summaryrefslogtreecommitdiffstats
path: root/library/stdarch/crates/stdarch-verify/x86-intel.xml
diff options
context:
space:
mode:
Diffstat (limited to 'library/stdarch/crates/stdarch-verify/x86-intel.xml')
-rw-r--r--library/stdarch/crates/stdarch-verify/x86-intel.xml148137
1 files changed, 148137 insertions, 0 deletions
diff --git a/library/stdarch/crates/stdarch-verify/x86-intel.xml b/library/stdarch/crates/stdarch-verify/x86-intel.xml
new file mode 100644
index 000000000..264ecee0e
--- /dev/null
+++ b/library/stdarch/crates/stdarch-verify/x86-intel.xml
@@ -0,0 +1,148137 @@
+<intrinsics_list version="3.5.3" date="06/30/2020">
+<intrinsic tech="Other" name="_addcarryx_u32">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>ADX</CPUID>
+ <category>Arithmetic</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned char" varname="c_in" etype="UI8"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="b" etype="UI32"/>
+ <parameter type="unsigned int *" varname="out" etype="UI32" memwidth="32"/>
+ <description>Add unsigned 32-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry or overflow flag), and store the unsigned 32-bit result in "out", and the carry-out in "dst" (carry or overflow flag).</description>
+ <operation>
+tmp[32:0] := a[31:0] + b[31:0] + (c_in &gt; 0 ? 1 : 0)
+MEM[out+31:out] := tmp[31:0]
+dst[0] := tmp[32]
+dst[7:1] := 0
+ </operation>
+ <instruction name="ADCX" form="r32, r32" xed="ADCX_GPR32d_GPR32d"/>
+ <instruction name="ADOX" form="r32, r32" xed="ADOX_GPR32d_GPR32d"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_addcarryx_u64">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>ADX</CPUID>
+ <category>Arithmetic</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned char" varname="c_in" etype="UI8"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="b" etype="UI64"/>
+ <parameter type="unsigned __int64 *" varname="out" etype="UI64" memwidth="64"/>
+ <description>Add unsigned 64-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry or overflow flag), and store the unsigned 64-bit result in "out", and the carry-out in "dst" (carry or overflow flag).</description>
+ <operation>
+tmp[64:0] := a[63:0] + b[63:0] + (c_in &gt; 0 ? 1 : 0)
+MEM[out+63:out] := tmp[63:0]
+dst[0] := tmp[64]
+dst[7:1] := 0
+ </operation>
+ <instruction name="ADCX" form="r64, r64" xed="ADCX_GPR64q_GPR64q"/>
+ <instruction name="ADOX" form="r64, r64" xed="ADOX_GPR64q_GPR64q"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" vexEq="TRUE" name="_mm_aesenc_si128">
+ <type>Integer</type>
+ <CPUID>AES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="RoundKey" etype="M128"/>
+ <description>Perform one round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst"."</description>
+ <operation>a[127:0] := ShiftRows(a[127:0])
+a[127:0] := SubBytes(a[127:0])
+a[127:0] := MixColumns(a[127:0])
+dst[127:0] := a[127:0] XOR RoundKey[127:0]
+ </operation>
+ <instruction name="AESENC" form="xmm, xmm" xed="AESENC_XMMdq_XMMdq"/>
+ <header>wmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" vexEq="TRUE" name="_mm_aesenclast_si128">
+ <type>Integer</type>
+ <CPUID>AES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="RoundKey" etype="M128"/>
+ <description>Perform the last round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst"."</description>
+ <operation>a[127:0] := ShiftRows(a[127:0])
+a[127:0] := SubBytes(a[127:0])
+dst[127:0] := a[127:0] XOR RoundKey[127:0]
+ </operation>
+ <instruction name="AESENCLAST" form="xmm, xmm" xed="AESENCLAST_XMMdq_XMMdq"/>
+ <header>wmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" vexEq="TRUE" name="_mm_aesdec_si128">
+ <type>Integer</type>
+ <CPUID>AES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="RoundKey" etype="M128"/>
+ <description>Perform one round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst".</description>
+ <operation>a[127:0] := InvShiftRows(a[127:0])
+a[127:0] := InvSubBytes(a[127:0])
+a[127:0] := InvMixColumns(a[127:0])
+dst[127:0] := a[127:0] XOR RoundKey[127:0]
+ </operation>
+ <instruction name="AESDEC" form="xmm, xmm" xed="AESDEC_XMMdq_XMMdq"/>
+ <header>wmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" vexEq="TRUE" name="_mm_aesdeclast_si128">
+ <type>Integer</type>
+ <CPUID>AES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="RoundKey" etype="M128"/>
+ <description>Perform the last round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the result in "dst".</description>
+ <operation>a[127:0] := InvShiftRows(a[127:0])
+a[127:0] := InvSubBytes(a[127:0])
+dst[127:0] := a[127:0] XOR RoundKey[127:0]
+ </operation>
+ <instruction name="AESDECLAST" form="xmm, xmm" xed="AESDECLAST_XMMdq_XMMdq"/>
+ <header>wmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" vexEq="TRUE" name="_mm_aesimc_si128">
+ <type>Integer</type>
+ <CPUID>AES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <description>Perform the InvMixColumns transformation on "a" and store the result in "dst".</description>
+ <operation>dst[127:0] := InvMixColumns(a[127:0])
+ </operation>
+ <instruction name="AESIMC" form="xmm, xmm" xed="AESIMC_XMMdq_XMMdq"/>
+ <header>wmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" vexEq="TRUE" name="_mm_aeskeygenassist_si128">
+ <type>Integer</type>
+ <CPUID>AES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Assist in expanding the AES cipher key by computing steps towards generating a round key for encryption cipher using data from "a" and an 8-bit round constant specified in "imm8", and store the result in "dst"."</description>
+ <operation>X3[31:0] := a[127:96]
+X2[31:0] := a[95:64]
+X1[31:0] := a[63:32]
+X0[31:0] := a[31:0]
+RCON[31:0] := ZeroExtend32(imm8[7:0])
+dst[31:0] := SubWord(X1)
+dst[63:32] := RotWord(SubWord(X1)) XOR RCON
+dst[95:64] := SubWord(X3)
+dst[127:96] := RotWord(SubWord(X3)) XOR RCON
+ </operation>
+ <instruction name="AESKEYGENASSIST" form="xmm, xmm, imm8" xed="AESKEYGENASSIST_XMMdq_XMMdq_IMMb"/>
+ <header>wmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_dpbf16ps">
+ <type>Tile</type>
+ <type>Floating Point</type>
+ <CPUID>AMXBF16</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <parameter type="__tile" varname="dst"/>
+ <parameter type="__tile" varname="a"/>
+ <parameter type="__tile" varname="b"/>
+ <description>Compute dot-product of BF16 (16-bit) floating-point pairs in tiles "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "dst", and store the 32-bit result back to tile "dst".</description>
+ <operation>FOR m := 0 TO dst.rows - 1
+ tmp := dst.row[m]
+ FOR k := 0 TO (a.colsb / 4) - 1
+ FOR n := 0 TO (dst.colsb / 4) - 1
+ tmp.fp32[n] += FP32(a.row[m].bf16[2*k+0]) * FP32(b.row[k].bf16[2*n+0])
+ tmp.fp32[n] += FP32(a.row[m].bf16[2*k+1]) * FP32(b.row[k].bf16[2*n+1])
+ ENDFOR
+ ENDFOR
+ write_row_and_zero(dst, m, tmp, dst.colsb)
+ENDFOR
+zero_upper_rows(dst, dst.rows)
+zero_tileconfig_start()
+ </operation>
+ <instruction name="TDPBF16PS" form="tmm, tmm, tmm" xed="TDPBF16PS_TMMf32_TMMu32_TMMu32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_dpbsud">
+ <type>Tile</type>
+ <CPUID>AMXINT8</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <parameter type="__tile" varname="dst"/>
+ <parameter type="__tile" varname="a"/>
+ <parameter type="__tile" varname="b"/>
+ <description>Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of signed 8-bit integers in "a" with corresponding unsigned 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst".</description>
+ <operation>DEFINE DPBD(c, x, y) {
+ tmp1 := SignExtend32(x.byte[0]) * ZeroExtend32(y.byte[0])
+ tmp2 := SignExtend32(x.byte[1]) * ZeroExtend32(y.byte[1])
+ tmp3 := SignExtend32(x.byte[2]) * ZeroExtend32(y.byte[2])
+ tmp4 := SignExtend32(x.byte[3]) * ZeroExtend32(y.byte[3])
+
+ RETURN c + tmp1 + tmp2 + tmp3 + tmp4
+}
+FOR m := 0 TO dst.rows - 1
+ tmp := dst.row[m]
+ FOR k := 0 TO (a.colsb / 4) - 1
+ FOR n := 0 TO (dst.colsb / 4) - 1
+ tmp.dword[n] := DPBD(tmp.dword[n], a.row[m].dword[k], b.row[k].dword[n])
+ ENDFOR
+ ENDFOR
+ write_row_and_zero(dst, m, tmp, dst.colsb)
+ENDFOR
+zero_upper_rows(dst, dst.rows)
+zero_tileconfig_start()
+ </operation>
+ <instruction name="TDPBSUD" form="tmm, tmm, tmm" xed="TDPBSUD_TMMi32_TMMu32_TMMu32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_dpbusd">
+ <type>Tile</type>
+ <CPUID>AMXINT8</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <parameter type="__tile" varname="dst"/>
+ <parameter type="__tile" varname="a"/>
+ <parameter type="__tile" varname="b"/>
+ <description>Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst".</description>
+ <operation>DEFINE DPBD(c, x, y) {
+ tmp1 := ZeroExtend32(x.byte[0]) * SignExtend32(y.byte[0])
+ tmp2 := ZeroExtend32(x.byte[1]) * SignExtend32(y.byte[1])
+ tmp3 := ZeroExtend32(x.byte[2]) * SignExtend32(y.byte[2])
+ tmp4 := ZeroExtend32(x.byte[3]) * SignExtend32(y.byte[3])
+
+ RETURN c + tmp1 + tmp2 + tmp3 + tmp4
+}
+FOR m := 0 TO dst.rows - 1
+ tmp := dst.row[m]
+ FOR k := 0 TO (a.colsb / 4) - 1
+ FOR n := 0 TO (dst.colsb / 4) - 1
+ tmp.dword[n] := DPBD(tmp.dword[n], a.row[m].dword[k], b.row[k].dword[n])
+ ENDFOR
+ ENDFOR
+ write_row_and_zero(dst, m, tmp, dst.colsb)
+ENDFOR
+zero_upper_rows(dst, dst.rows)
+zero_tileconfig_start()
+ </operation>
+ <instruction name="TDPBUSD" form="tmm, tmm, tmm" xed="TDPBUSD_TMMi32_TMMu32_TMMu32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_dpbuud">
+ <type>Tile</type>
+ <CPUID>AMXINT8</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <parameter type="__tile" varname="dst"/>
+ <parameter type="__tile" varname="a"/>
+ <parameter type="__tile" varname="b"/>
+ <description>Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding unsigned 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst".</description>
+ <operation>DEFINE DPBD(c, x, y) {
+ tmp1 := ZeroExtend32(x.byte[0]) * ZeroExtend32(y.byte[0])
+ tmp2 := ZeroExtend32(x.byte[1]) * ZeroExtend32(y.byte[1])
+ tmp3 := ZeroExtend32(x.byte[2]) * ZeroExtend32(y.byte[2])
+ tmp4 := ZeroExtend32(x.byte[3]) * ZeroExtend32(y.byte[3])
+
+ RETURN c + tmp1 + tmp2 + tmp3 + tmp4
+}
+FOR m := 0 TO dst.rows - 1
+ tmp := dst.row[m]
+ FOR k := 0 TO (a.colsb / 4) - 1
+ FOR n := 0 TO (dst.colsb / 4) - 1
+ tmp.dword[n] := DPBD(tmp.dword[n], a.row[m].dword[k], b.row[k].dword[n])
+ ENDFOR
+ ENDFOR
+ write_row_and_zero(dst, m, tmp, dst.colsb)
+ENDFOR
+zero_upper_rows(dst, dst.rows)
+zero_tileconfig_start()
+ </operation>
+ <instruction name="TDPBUUD" form="tmm, tmm, tmm" xed="TDPBUUD_TMMu32_TMMu32_TMMu32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_dpbssd">
+ <type>Tile</type>
+ <CPUID>AMXINT8</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <parameter type="__tile" varname="dst"/>
+ <parameter type="__tile" varname="a"/>
+ <parameter type="__tile" varname="b"/>
+ <description>Compute dot-product of bytes in tiles with a source/destination accumulator. Multiply groups of 4 adjacent pairs of signed 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate 32-bit results. Sum these 4 results with the corresponding 32-bit integer in "dst", and store the 32-bit result back to tile "dst".</description>
+ <operation>DEFINE DPBD(c, x, y) {
+ tmp1 := SignExtend32(x.byte[0]) * SignExtend32(y.byte[0])
+ tmp2 := SignExtend32(x.byte[1]) * SignExtend32(y.byte[1])
+ tmp3 := SignExtend32(x.byte[2]) * SignExtend32(y.byte[2])
+ tmp4 := SignExtend32(x.byte[3]) * SignExtend32(y.byte[3])
+
+ RETURN c + tmp1 + tmp2 + tmp3 + tmp4
+}
+FOR m := 0 TO dst.rows - 1
+ tmp := dst.row[m]
+ FOR k := 0 TO (a.colsb / 4) - 1
+ FOR n := 0 TO (dst.colsb / 4) - 1
+ tmp.dword[n] := DPBD(tmp.dword[n], a.row[m].dword[k], b.row[k].dword[n])
+ ENDFOR
+ ENDFOR
+ write_row_and_zero(dst, m, tmp, dst.colsb)
+ENDFOR
+zero_upper_rows(dst, dst.rows)
+zero_tileconfig_start()
+ </operation>
+ <instruction name="TDPBSSD" form="tmm, tmm, tmm" xed="TDPBSSD_TMMi32_TMMu32_TMMu32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_loadconfig">
+ <type>Tile</type>
+ <CPUID>AMXTILE</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <parameter type="const void *" varname="mem_addr" memwidth="512"/>
+ <description>Load tile configuration from a 64-byte memory location specified by "mem_addr". The tile configuration format is specified below, and includes the tile type pallette, the number of bytes per row, and the number of rows. If the specified pallette_id is zero, that signifies the init state for both the tile config and the tile data, and the tiles are zeroed. Any invalid configurations will result in #GP fault.</description>
+ <operation>
+// format of memory payload. each field is a byte.
+// 0: palette_id
+// 1: startRow (8b)
+// 2-15: reserved (must be zero)
+// 16-17: tile0.colsb -- bytes_per_row
+// 18-19: tile1.colsb
+// 20-21: tile2.colsb
+// ...
+// 46-47: tile15.colsb
+// 48: tile0.rows
+// 49: tile1.rows
+// 50: tile2.rows
+// ...
+// 63: tile15.rows
+ </operation>
+ <instruction name="LDTILECFG" form="m512" xed="LDTILECFG_MEM"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_storeconfig">
+ <type>Tile</type>
+ <CPUID>AMXTILE</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr" memwidth="512"/>
+ <description>Stores the current tile configuration to a 64-byte memory location specified by "mem_addr". The tile configuration format is specified below, and includes the tile type pallette, the number of bytes per row, and the number of rows. If tiles are not configured, all zeroes will be stored to memory.</description>
+ <operation>
+// format of memory payload. each field is a byte.
+// 0: palette_id
+// 1: startRow (8b)
+// 2-15: reserved (must be zero)
+// 16-17: tile0.colsb -- bytes_per_row
+// 18-19: tile1.colsb
+// 20-21: tile2.colsb
+// ...
+// 46-47: tile15.colsb
+// 48: tile0.rows
+// 49: tile1.rows
+// 50: tile2.rows
+// ...
+// 63: tile15.rows
+ </operation>
+ <instruction name="STTILECFG" form="m512" xed="STTILECFG_MEM"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_loadd">
+ <type>Tile</type>
+ <CPUID>AMXTILE</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <parameter type="__tile" varname="dst"/>
+ <parameter type="const void *" varname="base"/>
+ <parameter type="int" varname="stride" etype="UI32"/>
+ <description>Load tile rows from memory specifieid by "base" address and "stride" into destination tile "dst" using the tile configuration previously configured via "_tile_loadconfig".</description>
+ <operation>start := tileconfig.startRow
+IF start == 0 // not restarting, zero incoming state
+ tilezero(dst)
+FI
+nbytes := dst.colsb
+DO WHILE start &lt; dst.rows
+ memptr := base + start * stride
+ write_row_and_zero(dst, start, read_memory(memptr, nbytes), nbytes)
+ start := start + 1
+OD
+zero_upper_rows(dst, dst.rows)
+zero_tileconfig_start()
+ </operation>
+ <instruction name="TILELOADD" form="tmm, sibmem" xed="TILELOADD_TMMu32_MEMu32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_stream_loadd">
+ <type>Tile</type>
+ <CPUID>AMXTILE</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <parameter type="__tile" varname="dst"/>
+ <parameter type="const void *" varname="base"/>
+ <parameter type="int" varname="stride" etype="UI32"/>
+ <description>Load tile rows from memory specifieid by "base" address and "stride" into destination tile "dst" using the tile configuration previously configured via "_tile_loadconfig". This intrinsic provides a hint to the implementation that the data will likely not be reused in the near future and the data caching can be optimized accordingly.</description>
+ <operation>start := tileconfig.startRow
+IF start == 0 // not restarting, zero incoming state
+ tilezero(dst)
+FI
+nbytes := dst.colsb
+DO WHILE start &lt; dst.rows
+ memptr := base + start * stride
+ write_row_and_zero(dst, start, read_memory(memptr, nbytes), nbytes)
+ start := start + 1
+OD
+zero_upper_rows(dst, dst.rows)
+zero_tileconfig_start()
+ </operation>
+ <instruction name="TILELOADDT1" form="tmm, sibmem" xed="TILELOADDT1_TMMu32_MEMu32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_release">
+ <type>Tile</type>
+ <CPUID>AMXTILE</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <description>Release the tile configuration to return to the init state, which releases all storage it currently holds.</description>
+ <instruction name="TILERELEASE" xed="TILERELEASE"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_stored">
+ <type>Tile</type>
+ <CPUID>AMXTILE</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <parameter type="__tile" varname="src" />
+ <parameter type="void *" varname="base"/>
+ <parameter type="int" varname="stride" etype="UI32"/>
+ <description>Store the tile specified by "src" to memory specifieid by "base" address and "stride" using the tile configuration previously configured via "_tile_loadconfig".</description>
+ <operation>start := tileconfig.startRow
+DO WHILE start &lt; src.rows
+ memptr := base + start * stride
+ write_memory(memptr, src.colsb, src.row[start])
+ start := start + 1
+OD
+zero_tileconfig_start()
+ </operation>
+ <instruction name="TILESTORED" form="sibmem, tmm" xed="TILESTORED_MEMu32_TMMu32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AMX" name="_tile_zero">
+ <type>Tile</type>
+ <CPUID>AMXTILE</CPUID>
+ <category>Application-Targeted</category>
+ <return type="void"/>
+ <parameter type="__tile" varname="tdest"/>
+ <description>Zero the tile specified by "tdest".</description>
+ <operation>nbytes := palette_table[tileconfig.palette_id].bytes_per_row
+FOR i := 0 TO palette_table[tileconfig.palette_id].max_rows-1
+ FOR j := 0 TO nbytes-1
+ tdest.row[i].byte[j] := 0
+ ENDFOR
+ENDFOR
+ </operation>
+ <instruction name="TILEZERO" form="tmm" xed="TILEZERO_TMMu32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_add_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VADDPD" form="ymm, ymm, ymm" xed="VADDPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_add_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VADDPS" form="ymm, ymm, ymm" xed="VADDPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_addsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Alternatively add and subtract packed double-precision (64-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VADDSUBPD" form="ymm, ymm, ymm" xed="VADDSUBPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_addsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Alternatively add and subtract packed single-precision (32-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VADDSUBPS" form="ymm, ymm, ymm" xed="VADDSUBPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_and_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] AND b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDPD" form="ymm, ymm, ymm" xed="VANDPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_and_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] AND b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDPS" form="ymm, ymm, ymm" xed="VANDPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_andnot_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDNPD" form="ymm, ymm, ymm" xed="VANDNPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_andnot_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDNPS" form="ymm, ymm, ymm" xed="VANDNPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_blend_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF imm8[j]
+ dst[i+63:i] := b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBLENDPD" form="ymm, ymm, ymm, imm8" xed="VBLENDPD_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_blend_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF imm8[j]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBLENDPS" form="ymm, ymm, ymm, imm8" xed="VBLENDPS_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_blendv_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="mask" etype="MASK"/>
+ <description>Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF mask[i+63]
+ dst[i+63:i] := b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBLENDVPD" form="ymm, ymm, ymm, ymm" xed="VBLENDVPD_YMMqq_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_blendv_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="mask" etype="MASK"/>
+ <description>Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF mask[i+31]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBLENDVPS" form="ymm, ymm, ymm, ymm" xed="VBLENDVPS_YMMqq_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_div_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDIVPD" form="ymm, ymm, ymm" xed="VDIVPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_div_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDIVPS" form="ymm, ymm, ymm" xed="VDIVPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_dp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Conditionally multiply the packed single-precision (32-bit) floating-point elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and conditionally store the sum in "dst" using the low 4 bits of "imm8".</description>
+ <operation>
+DEFINE DP(a[127:0], b[127:0], imm8[7:0]) {
+ FOR j := 0 to 3
+ i := j*32
+ IF imm8[(4+j)%8]
+ temp[i+31:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ temp[i+31:i] := FP32(0.0)
+ FI
+ ENDFOR
+
+ sum[31:0] := (temp[127:96] + temp[95:64]) + (temp[63:32] + temp[31:0])
+
+ FOR j := 0 to 3
+ i := j*32
+ IF imm8[j%8]
+ tmpdst[i+31:i] := sum[31:0]
+ ELSE
+ tmpdst[i+31:i] := FP32(0.0)
+ FI
+ ENDFOR
+ RETURN tmpdst[127:0]
+}
+dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0])
+dst[255:128] := DP(a[255:128], b[255:128], imm8[7:0])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDPPS" form="ymm, ymm, ymm, imm8" xed="VDPPS_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_hadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Horizontally add adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst".</description>
+ <operation>
+dst[63:0] := a[127:64] + a[63:0]
+dst[127:64] := b[127:64] + b[63:0]
+dst[191:128] := a[255:192] + a[191:128]
+dst[255:192] := b[255:192] + b[191:128]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VHADDPD" form="ymm, ymm, ymm" xed="VHADDPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_hadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Horizontally add adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst".</description>
+ <operation>
+dst[31:0] := a[63:32] + a[31:0]
+dst[63:32] := a[127:96] + a[95:64]
+dst[95:64] := b[63:32] + b[31:0]
+dst[127:96] := b[127:96] + b[95:64]
+dst[159:128] := a[191:160] + a[159:128]
+dst[191:160] := a[255:224] + a[223:192]
+dst[223:192] := b[191:160] + b[159:128]
+dst[255:224] := b[255:224] + b[223:192]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VHADDPS" form="ymm, ymm, ymm" xed="VHADDPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_hsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Horizontally subtract adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst".</description>
+ <operation>
+dst[63:0] := a[63:0] - a[127:64]
+dst[127:64] := b[63:0] - b[127:64]
+dst[191:128] := a[191:128] - a[255:192]
+dst[255:192] := b[191:128] - b[255:192]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VHSUBPD" form="ymm, ymm, ymm" xed="VHSUBPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_hsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Horizontally add adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst".</description>
+ <operation>
+dst[31:0] := a[31:0] - a[63:32]
+dst[63:32] := a[95:64] - a[127:96]
+dst[95:64] := b[31:0] - b[63:32]
+dst[127:96] := b[95:64] - b[127:96]
+dst[159:128] := a[159:128] - a[191:160]
+dst[191:160] := a[223:192] - a[255:224]
+dst[223:192] := b[159:128] - b[191:160]
+dst[255:224] := b[223:192] - b[255:224]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VHSUBPS" form="ymm, ymm, ymm" xed="VHSUBPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_max_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMAXPD" form="ymm, ymm, ymm" xed="VMAXPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_max_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMAXPS" form="ymm, ymm, ymm" xed="VMAXPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_min_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMINPD" form="ymm, ymm, ymm" xed="VMINPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_min_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMINPS" form="ymm, ymm, ymm" xed="VMINPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_mul_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMULPD" form="ymm, ymm, ymm" xed="VMULPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_mul_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMULPS" form="ymm, ymm, ymm" xed="VMULPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_or_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VORPD" form="ymm, ymm, ymm" xed="VORPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_or_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VORPS" form="ymm, ymm, ymm" xed="VORPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_shuffle_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
+dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
+dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192]
+dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFPD" form="ymm, ymm, ymm, imm8" xed="VSHUFPD_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_shuffle_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+dst[95:64] := SELECT4(b[127:0], imm8[5:4])
+dst[127:96] := SELECT4(b[127:0], imm8[7:6])
+dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+dst[223:192] := SELECT4(b[255:128], imm8[5:4])
+dst[255:224] := SELECT4(b[255:128], imm8[7:6])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFPS" form="ymm, ymm, ymm, imm8" xed="VSHUFPS_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_sub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSUBPD" form="ymm, ymm, ymm" xed="VSUBPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_sub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSUBPS" form="ymm, ymm, ymm" xed="VSUBPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_xor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VXORPD" form="ymm, ymm, ymm" xed="VXORPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_xor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VXORPS" form="ymm, ymm, ymm" xed="VXORPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_cmp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ( a[i+63:i] OP b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCMPPD" form="xmm, xmm, xmm, imm8" xed="VCMPPD_XMMdq_XMMdq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_cmp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Compare</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ( a[i+63:i] OP b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCMPPD" form="ymm, ymm, ymm, imm8" xed="VCMPPD_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_cmp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] OP b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCMPPS" form="xmm, xmm, xmm, imm8" xed="VCMPPS_XMMdq_XMMdq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_cmp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Compare</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in "dst".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] OP b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCMPPS" form="ymm, ymm, ymm, imm8" xed="VCMPPS_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_cmp_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+dst[63:0] := ( a[63:0] OP b[63:0] ) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCMPSD" form="xmm, xmm, xmm, imm8" xed="VCMPSD_XMMdq_XMMdq_XMMq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_cmp_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+dst[31:0] := ( a[31:0] OP b[31:0] ) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCMPSS" form="xmm, xmm, xmm, imm8" xed="VCMPSS_XMMdq_XMMdq_XMMd_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_cvtepi32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTDQ2PD" form="ymm, xmm" xed="VCVTDQ2PD_YMMqq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_cvtepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTDQ2PS" form="ymm, ymm" xed="VCVTDQ2PS_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_cvtpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="xmm, ymm" xed="VCVTPD2PS_XMMdq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_cvtps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2DQ" form="ymm, ymm" xed="VCVTPS2DQ_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_cvtps_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 32*j
+ dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2PD" form="ymm, xmm" xed="VCVTPS2PD_YMMqq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_cvttpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2DQ" form="xmm, ymm" xed="VCVTTPD2DQ_XMMdq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_cvtpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2DQ" form="xmm, ymm" xed="VCVTPD2DQ_XMMdq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_cvttps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2DQ" form="ymm, ymm" xed="VCVTTPS2DQ_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_extractf128_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF128" form="xmm, ymm, imm8" xed="VEXTRACTF128_XMMdq_YMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_extractf128_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF128" form="xmm, ymm, imm8" xed="VEXTRACTF128_XMMdq_YMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_extractf128_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of integer data) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF128" form="xmm, ymm, imm8" xed="VEXTRACTF128_XMMdq_YMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_extract_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__int32" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="index" etype="IMM" immwidth="3"/>
+ <description>Extract a 32-bit integer from "a", selected with "index", and store the result in "dst".</description>
+ <operation>
+dst[31:0] := (a[255:0] &gt;&gt; (index[2:0] * 32))[31:0]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_extract_epi64">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="index" etype="IMM" immwidth="2"/>
+ <description>Extract a 64-bit integer from "a", selected with "index", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := (a[255:0] &gt;&gt; (index[1:0] * 64))[63:0]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_zeroall">
+ <CPUID>AVX</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Zero the contents of all XMM or YMM registers.</description>
+ <operation>YMM0[MAX:0] := 0
+YMM1[MAX:0] := 0
+YMM2[MAX:0] := 0
+YMM3[MAX:0] := 0
+YMM4[MAX:0] := 0
+YMM5[MAX:0] := 0
+YMM6[MAX:0] := 0
+YMM7[MAX:0] := 0
+IF _64_BIT_MODE
+ YMM8[MAX:0] := 0
+ YMM9[MAX:0] := 0
+ YMM10[MAX:0] := 0
+ YMM11[MAX:0] := 0
+ YMM12[MAX:0] := 0
+ YMM13[MAX:0] := 0
+ YMM14[MAX:0] := 0
+ YMM15[MAX:0] := 0
+FI
+ </operation>
+ <instruction name="VZEROALL" xed="VZEROALL"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_zeroupper">
+ <CPUID>AVX</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Zero the upper 128 bits of all YMM registers; the lower 128-bits of the registers are unmodified.</description>
+ <operation>YMM0[MAX:128] := 0
+YMM1[MAX:128] := 0
+YMM2[MAX:128] := 0
+YMM3[MAX:128] := 0
+YMM4[MAX:128] := 0
+YMM5[MAX:128] := 0
+YMM6[MAX:128] := 0
+YMM7[MAX:128] := 0
+IF _64_BIT_MODE
+ YMM8[MAX:128] := 0
+ YMM9[MAX:128] := 0
+ YMM10[MAX:128] := 0
+ YMM11[MAX:128] := 0
+ YMM12[MAX:128] := 0
+ YMM13[MAX:128] := 0
+ YMM14[MAX:128] := 0
+ YMM15[MAX:128] := 0
+FI
+ </operation>
+ <instruction name="VZEROUPPER" xed="VZEROUPPER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_permutevar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], b[1:0])
+dst[63:32] := SELECT4(a[127:0], b[33:32])
+dst[95:64] := SELECT4(a[127:0], b[65:64])
+dst[127:96] := SELECT4(a[127:0], b[97:96])
+dst[159:128] := SELECT4(a[255:128], b[129:128])
+dst[191:160] := SELECT4(a[255:128], b[161:160])
+dst[223:192] := SELECT4(a[255:128], b[193:192])
+dst[255:224] := SELECT4(a[255:128], b[225:224])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="ymm, ymm, ymm" xed="VPERMILPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_permutevar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "b", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], b[1:0])
+dst[63:32] := SELECT4(a[127:0], b[33:32])
+dst[95:64] := SELECT4(a[127:0], b[65:64])
+dst[127:96] := SELECT4(a[127:0], b[97:96])
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="xmm, xmm, xmm" xed="VPERMILPS_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_permute_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="ymm, ymm, imm8" xed="VPERMILPS_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_permute_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="xmm, xmm, imm8" xed="VPERMILPS_XMMdq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_permutevar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst".</description>
+ <operation>
+IF (b[1] == 0) dst[63:0] := a[63:0]; FI
+IF (b[1] == 1) dst[63:0] := a[127:64]; FI
+IF (b[65] == 0) dst[127:64] := a[63:0]; FI
+IF (b[65] == 1) dst[127:64] := a[127:64]; FI
+IF (b[129] == 0) dst[191:128] := a[191:128]; FI
+IF (b[129] == 1) dst[191:128] := a[255:192]; FI
+IF (b[193] == 0) dst[255:192] := a[191:128]; FI
+IF (b[193] == 1) dst[255:192] := a[255:192]; FI
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="ymm, ymm, ymm" xed="VPERMILPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_permutevar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "b", and store the results in "dst".</description>
+ <operation>
+IF (b[1] == 0) dst[63:0] := a[63:0]; FI
+IF (b[1] == 1) dst[63:0] := a[127:64]; FI
+IF (b[65] == 0) dst[127:64] := a[63:0]; FI
+IF (b[65] == 1) dst[127:64] := a[127:64]; FI
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="xmm, xmm, xmm" xed="VPERMILPD_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_permute_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+IF (imm8[0] == 0) dst[63:0] := a[63:0]; FI
+IF (imm8[0] == 1) dst[63:0] := a[127:64]; FI
+IF (imm8[1] == 0) dst[127:64] := a[63:0]; FI
+IF (imm8[1] == 1) dst[127:64] := a[127:64]; FI
+IF (imm8[2] == 0) dst[191:128] := a[191:128]; FI
+IF (imm8[2] == 1) dst[191:128] := a[255:192]; FI
+IF (imm8[3] == 0) dst[255:192] := a[191:128]; FI
+IF (imm8[3] == 1) dst[255:192] := a[255:192]; FI
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="ymm, ymm, imm8" xed="VPERMILPD_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_permute_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+IF (imm8[0] == 0) dst[63:0] := a[63:0]; FI
+IF (imm8[0] == 1) dst[63:0] := a[127:64]; FI
+IF (imm8[1] == 0) dst[127:64] := a[63:0]; FI
+IF (imm8[1] == 1) dst[127:64] := a[127:64]; FI
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="xmm, xmm, imm8" xed="VPERMILPD_XMMdq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_permute2f128_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src1, src2, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src1[127:0]
+ 1: tmp[127:0] := src1[255:128]
+ 2: tmp[127:0] := src2[127:0]
+ 3: tmp[127:0] := src2[255:128]
+ ESAC
+ IF control[3]
+ tmp[127:0] := 0
+ FI
+ RETURN tmp[127:0]
+}
+dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0])
+dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERM2F128" form="ymm, ymm, ymm, imm8" xed="VPERM2F128_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_permute2f128_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src1, src2, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src1[127:0]
+ 1: tmp[127:0] := src1[255:128]
+ 2: tmp[127:0] := src2[127:0]
+ 3: tmp[127:0] := src2[255:128]
+ ESAC
+ IF control[3]
+ tmp[127:0] := 0
+ FI
+ RETURN tmp[127:0]
+}
+dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0])
+dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERM2F128" form="ymm, ymm, ymm, imm8" xed="VPERM2F128_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_permute2f128_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <parameter type="__m256i" varname="b" etype="M256"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of integer data) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src1, src2, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src1[127:0]
+ 1: tmp[127:0] := src1[255:128]
+ 2: tmp[127:0] := src2[127:0]
+ 3: tmp[127:0] := src2[255:128]
+ ESAC
+ IF control[3]
+ tmp[127:0] := 0
+ FI
+ RETURN tmp[127:0]
+}
+dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0])
+dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERM2F128" form="ymm, ymm, ymm, imm8" xed="VPERM2F128_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_broadcast_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="float const *" varname="mem_addr" etype="FP32" memwidth="32"/>
+ <description>Broadcast a single-precision (32-bit) floating-point element from memory to all elements of "dst".</description>
+ <operation>
+tmp[31:0] := MEM[mem_addr+31:mem_addr]
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := tmp[31:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTSS" form="ymm, m32" xed="VBROADCASTSS_YMMqq_MEMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_broadcast_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float const *" varname="mem_addr" etype="FP32" memwidth="32"/>
+ <description>Broadcast a single-precision (32-bit) floating-point element from memory to all elements of "dst".</description>
+ <operation>
+tmp[31:0] := MEM[mem_addr+31:mem_addr]
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := tmp[31:0]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VBROADCASTSS" form="xmm, m32" xed="VBROADCASTSS_XMMdq_MEMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_broadcast_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="double const *" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <description>Broadcast a double-precision (64-bit) floating-point element from memory to all elements of "dst".</description>
+ <operation>
+tmp[63:0] := MEM[mem_addr+63:mem_addr]
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := tmp[63:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTSD" form="ymm, m64" xed="VBROADCASTSD_YMMqq_MEMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_broadcast_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m128 const *" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <description>Broadcast 128 bits from memory (composed of 4 packed single-precision (32-bit) floating-point elements) to all elements of "dst".</description>
+ <operation>
+tmp[127:0] := MEM[mem_addr+127:mem_addr]
+dst[127:0] := tmp[127:0]
+dst[255:128] := tmp[127:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTF128" form="ymm, m128" xed="VBROADCASTF128_YMMqq_MEMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_broadcast_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d const *" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <description>Broadcast 128 bits from memory (composed of 2 packed double-precision (64-bit) floating-point elements) to all elements of "dst".</description>
+ <operation>
+tmp[127:0] := MEM[mem_addr+127:mem_addr]
+dst[127:0] := tmp[127:0]
+dst[255:128] := tmp[127:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTF128" form="ymm, m128" xed="VBROADCASTF128_YMMqq_MEMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_insertf128_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF128" form="ymm, ymm, xmm, imm8" xed="VINSERTF128_YMMqq_YMMqq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_insertf128_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[255:0] := a[255:0]
+CASE imm8[0] OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF128" form="ymm, ymm, xmm, imm8" xed="VINSERTF128_YMMqq_YMMqq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_insertf128_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 128 bits from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF128" form="ymm, ymm, xmm, imm8" xed="VINSERTF128_YMMqq_YMMqq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_insert_epi8">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__int8" varname="i" etype="UI8"/>
+ <parameter type="const int" varname="index" etype="IMM" immwidth="5"/>
+ <description>Copy "a" to "dst", and insert the 8-bit integer "i" into "dst" at the location specified by "index".</description>
+ <operation>
+dst[255:0] := a[255:0]
+sel := index[4:0]*8
+dst[sel+7:sel] := i[7:0]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_insert_epi16">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__int16" varname="i" etype="UI16"/>
+ <parameter type="const int" varname="index" etype="IMM" immwidth="4"/>
+ <description>Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "index".</description>
+ <operation>
+dst[255:0] := a[255:0]
+sel := index[3:0]*16
+dst[sel+15:sel] := i[15:0]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_insert_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__int32" varname="i" etype="UI32"/>
+ <parameter type="const int" varname="index" etype="IMM" immwidth="3"/>
+ <description>Copy "a" to "dst", and insert the 32-bit integer "i" into "dst" at the location specified by "index".</description>
+ <operation>
+dst[255:0] := a[255:0]
+sel := index[2:0]*32
+dst[sel+31:sel] := i[31:0]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_insert_epi64">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__int64" varname="i" etype="UI64"/>
+ <parameter type="const int" varname="index" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "dst", and insert the 64-bit integer "i" into "dst" at the location specified by "index".</description>
+ <operation>
+dst[255:0] := a[255:0]
+sel := index[1:0]*64
+dst[sel+63:sel] := i[63:0]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_load_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="double const *" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <description>Load 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from memory into "dst".
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="ymm, m256" xed="VMOVAPD_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_store_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double *" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a" into memory.
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVAPD" form="m256, ymm" xed="VMOVAPD_MEMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_load_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="float const *" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <description>Load 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from memory into "dst".
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="ymm, m256" xed="VMOVAPS_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_store_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float *" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a" into memory.
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVAPS" form="m256, ymm" xed="VMOVAPS_MEMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_loadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="double const *" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <description>Load 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVUPD" form="ymm, m256" xed="VMOVUPD_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_storeu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double *" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVUPD" form="m256, ymm" xed="VMOVUPD_MEMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_loadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="float const *" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <description>Load 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVUPS" form="ymm, m256" xed="VMOVUPS_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_storeu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float *" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVUPS" form="m256, ymm" xed="VMOVUPS_MEMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_load_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256i const *" varname="mem_addr" etype="M256" memwidth="256"/>
+ <description>Load 256-bits of integer data from memory into "dst".
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQA" form="ymm, m256" xed="VMOVDQA_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_store_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m256i *" varname="mem_addr" etype="M256" memwidth="256"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <description>Store 256-bits of integer data from "a" into memory.
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVDQA" form="m256, ymm" xed="VMOVDQA_MEMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_loadu_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256i const *" varname="mem_addr" etype="M256" memwidth="256"/>
+ <description>Load 256-bits of integer data from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU" form="ymm, m256" xed="VMOVDQU_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_storeu_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m256i *" varname="mem_addr" etype="M256" memwidth="256"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <description>Store 256-bits of integer data from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVDQU" form="m256, ymm" xed="VMOVDQU_MEMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_maskload_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="double const *" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF mask[i+63]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMASKMOVPD" form="ymm, ymm, m256" xed="VMASKMOVPD_YMMqq_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_maskstore_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double *" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Store packed double-precision (64-bit) floating-point elements from "a" into memory using "mask".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF mask[i+63]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMASKMOVPD" form="m256, ymm, ymm" xed="VMASKMOVPD_MEMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_maskload_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double const *" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF mask[i+63]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMASKMOVPD" form="xmm, xmm, m128" xed="VMASKMOVPD_XMMdq_XMMdq_MEMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_maskstore_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double *" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store packed double-precision (64-bit) floating-point elements from "a" into memory using "mask".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF mask[i+63]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMASKMOVPD" form="m128, xmm, xmm" xed="VMASKMOVPD_MEMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_maskload_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="float const *" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF mask[i+31]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMASKMOVPS" form="ymm, ymm, m256" xed="VMASKMOVPS_YMMqq_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_maskstore_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float *" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Store packed single-precision (32-bit) floating-point elements from "a" into memory using "mask".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF mask[i+31]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMASKMOVPS" form="m256, ymm, ymm" xed="VMASKMOVPS_MEMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_maskload_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float const *" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using "mask" (elements are zeroed out when the high bit of the corresponding element is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF mask[i+31]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMASKMOVPS" form="xmm, xmm, m128" xed="VMASKMOVPS_XMMdq_XMMdq_MEMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_maskstore_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float *" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store packed single-precision (32-bit) floating-point elements from "a" into memory using "mask".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF mask[i+31]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMASKMOVPS" form="m128, xmm, xmm" xed="VMASKMOVPS_MEMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_movehdup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Move</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst".</description>
+ <operation>
+dst[31:0] := a[63:32]
+dst[63:32] := a[63:32]
+dst[95:64] := a[127:96]
+dst[127:96] := a[127:96]
+dst[159:128] := a[191:160]
+dst[191:160] := a[191:160]
+dst[223:192] := a[255:224]
+dst[255:224] := a[255:224]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVSHDUP" form="ymm, ymm" xed="VMOVSHDUP_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_moveldup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Move</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+dst[63:32] := a[31:0]
+dst[95:64] := a[95:64]
+dst[127:96] := a[95:64]
+dst[159:128] := a[159:128]
+dst[191:160] := a[159:128]
+dst[223:192] := a[223:192]
+dst[255:224] := a[223:192]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVSLDUP" form="ymm, ymm" xed="VMOVSLDUP_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_movedup_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Move</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[127:64] := a[63:0]
+dst[191:128] := a[191:128]
+dst[255:192] := a[191:128]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDDUP" form="ymm, ymm" xed="VMOVDDUP_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_lddqu_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256i const *" varname="mem_addr" etype="M256" memwidth="256"/>
+ <description>Load 256-bits of integer data from unaligned memory into "dst". This intrinsic may perform better than "_mm256_loadu_si256" when the data crosses a cache line boundary.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VLDDQU" form="ymm, m256" xed="VLDDQU_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_stream_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m256i *" varname="mem_addr" etype="M256" memwidth="256"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <description>Store 256-bits of integer data from "a" into memory using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVNTDQ" form="m256, ymm" xed="VMOVNTDQ_MEMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_stream_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double *" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Store 256-bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a" into memory using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVNTPD" form="m256, ymm" xed="VMOVNTPD_MEMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_stream_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float *" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a" into memory using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVNTPS" form="m256, ymm" xed="VMOVNTPS_MEMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_rcp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := 1.0 / a[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRCPPS" form="ymm, ymm" xed="VRCPPS_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_rsqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRSQRTPS" form="ymm, ymm" xed="VRSQRTPS_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_sqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := SQRT(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSQRTPD" form="ymm, ymm" xed="VSQRTPD_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_sqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := SQRT(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSQRTPS" form="ymm, ymm" xed="VSQRTPS_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" immtype="_MM_FROUND"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed double-precision floating-point elements in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ROUND(a[i+63:i], rounding)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VROUNDPD" form="ymm, ymm, imm8" xed="VROUNDPD_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" immtype="_MM_FROUND"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed single-precision floating-point elements in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ROUND(a[i+31:i], rounding)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VROUNDPS" form="ymm, ymm, imm8" xed="VROUNDPS_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_unpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKHPD" form="ymm, ymm, ymm" xed="VUNPCKHPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_unpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKHPS" form="ymm, ymm, ymm" xed="VUNPCKHPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_unpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKLPD" form="ymm, ymm, ymm" xed="VUNPCKLPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_unpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKLPS" form="ymm, ymm, ymm" xed="VUNPCKLPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_testz_si256">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <parameter type="__m256i" varname="b" etype="M256"/>
+ <description>Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "ZF" value.</description>
+ <operation>
+IF ((a[255:0] AND b[255:0]) == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+IF (((NOT a[255:0]) AND b[255:0]) == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+RETURN ZF
+ </operation>
+ <instruction name="VPTEST" form="ymm, ymm" xed="VPTEST_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_testc_si256">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <parameter type="__m256i" varname="b" etype="M256"/>
+ <description>Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "CF" value.</description>
+ <operation>
+IF ((a[255:0] AND b[255:0]) == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+IF (((NOT a[255:0]) AND b[255:0]) == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+RETURN CF
+ </operation>
+ <instruction name="VPTEST" form="ymm, ymm" xed="VPTEST_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_testnzc_si256">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <parameter type="__m256i" varname="b" etype="M256"/>
+ <description>Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0.</description>
+ <operation>
+IF ((a[255:0] AND b[255:0]) == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+IF (((NOT a[255:0]) AND b[255:0]) == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+IF (ZF == 0 &amp;&amp; CF == 0)
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="VPTEST" form="ymm, ymm" xed="VPTEST_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_testz_pd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of 256 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value.</description>
+ <operation>
+tmp[255:0] := a[255:0] AND b[255:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[255] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[255:0] := (NOT a[255:0]) AND b[255:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[255] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+dst := ZF
+ </operation>
+ <instruction name="VTESTPD" form="ymm, ymm" xed="VTESTPD_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_testc_pd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of 256 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value.</description>
+ <operation>
+tmp[255:0] := a[255:0] AND b[255:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[255] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[255:0] := (NOT a[255:0]) AND b[255:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[255] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+dst := CF
+ </operation>
+ <instruction name="VTESTPD" form="ymm, ymm" xed="VTESTPD_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_testnzc_pd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of 256 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0.</description>
+ <operation>
+tmp[255:0] := a[255:0] AND b[255:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[255] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[255:0] := (NOT a[255:0]) AND b[255:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[255] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+IF (ZF == 0 &amp;&amp; CF == 0)
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="VTESTPD" form="ymm, ymm" xed="VTESTPD_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_testz_pd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of 128 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value.</description>
+ <operation>
+tmp[127:0] := a[127:0] AND b[127:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[127:0] := (NOT a[127:0]) AND b[127:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+dst := ZF
+ </operation>
+ <instruction name="VTESTPD" form="xmm, xmm" xed="VTESTPD_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_testc_pd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of 128 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value.</description>
+ <operation>
+tmp[127:0] := a[127:0] AND b[127:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[127:0] := (NOT a[127:0]) AND b[127:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+dst := CF
+ </operation>
+ <instruction name="VTESTPD" form="xmm, xmm" xed="VTESTPD_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_testnzc_pd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of 128 bits (representing double-precision (64-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 64-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0.</description>
+ <operation>
+tmp[127:0] := a[127:0] AND b[127:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[127:0] := (NOT a[127:0]) AND b[127:0]
+IF (tmp[63] == 0 &amp;&amp; tmp[127] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+IF (ZF == 0 &amp;&amp; CF == 0)
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="VTESTPD" form="xmm, xmm" xed="VTESTPD_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_testz_ps">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of 256 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value.</description>
+ <operation>
+tmp[255:0] := a[255:0] AND b[255:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; \
+ tmp[159] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[223] == 0 &amp;&amp; tmp[255] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[255:0] := (NOT a[255:0]) AND b[255:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; \
+ tmp[159] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[223] == 0 &amp;&amp; tmp[255] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+dst := ZF
+ </operation>
+ <instruction name="VTESTPS" form="ymm, ymm" xed="VTESTPS_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_testc_ps">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of 256 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value.</description>
+ <operation>
+tmp[255:0] := a[255:0] AND b[255:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; \
+ tmp[159] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[223] == 0 &amp;&amp; tmp[255] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[255:0] := (NOT a[255:0]) AND b[255:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; \
+ tmp[159] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[223] == 0 &amp;&amp; tmp[255] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+dst := CF
+ </operation>
+ <instruction name="VTESTPS" form="ymm, ymm" xed="VTESTPS_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_testnzc_ps">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of 256 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 256-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0.</description>
+ <operation>
+tmp[255:0] := a[255:0] AND b[255:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; \
+ tmp[159] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[223] == 0 &amp;&amp; tmp[255] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[255:0] := (NOT a[255:0]) AND b[255:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0 &amp;&amp; \
+ tmp[159] == 0 &amp;&amp; tmp[191] == 0 &amp;&amp; tmp[223] == 0 &amp;&amp; tmp[255] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+IF (ZF == 0 &amp;&amp; CF == 0)
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="VTESTPS" form="ymm, ymm" xed="VTESTPS_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_testz_ps">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of 128 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "ZF" value.</description>
+ <operation>
+tmp[127:0] := a[127:0] AND b[127:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[127:0] := (NOT a[127:0]) AND b[127:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+dst := ZF
+ </operation>
+ <instruction name="VTESTPS" form="xmm, xmm" xed="VTESTPS_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_testc_ps">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of 128 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return the "CF" value.</description>
+ <operation>
+tmp[127:0] := a[127:0] AND b[127:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[127:0] := (NOT a[127:0]) AND b[127:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+dst := CF
+ </operation>
+ <instruction name="VTESTPS" form="xmm, xmm" xed="VTESTPS_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm_testnzc_ps">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of 128 bits (representing single-precision (32-bit) floating-point elements) in "a" and "b", producing an intermediate 128-bit value, and set "ZF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", producing an intermediate value, and set "CF" to 1 if the sign bit of each 32-bit element in the intermediate value is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0.</description>
+ <operation>
+tmp[127:0] := a[127:0] AND b[127:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+tmp[127:0] := (NOT a[127:0]) AND b[127:0]
+IF (tmp[31] == 0 &amp;&amp; tmp[63] == 0 &amp;&amp; tmp[95] == 0 &amp;&amp; tmp[127] == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+IF (ZF == 0 &amp;&amp; CF == 0)
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="VTESTPS" form="xmm, xmm" xed="VTESTPS_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_movemask_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Set each bit of mask "dst" based on the most significant bit of the corresponding packed double-precision (64-bit) floating-point element in "a".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF a[i+63]
+ dst[j] := 1
+ ELSE
+ dst[j] := 0
+ FI
+ENDFOR
+dst[MAX:4] := 0
+ </operation>
+ <instruction name="VMOVMSKPD" form="r32, ymm" xed="VMOVMSKPD_GPR32d_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_movemask_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Set each bit of mask "dst" based on the most significant bit of the corresponding packed single-precision (32-bit) floating-point element in "a".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF a[i+31]
+ dst[j] := 1
+ ELSE
+ dst[j] := 0
+ FI
+ENDFOR
+dst[MAX:8] := 0
+ </operation>
+ <instruction name="VMOVMSKPS" form="r32, ymm" xed="VMOVMSKPS_GPR32d_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_setzero_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m256d with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="VXORPD" form="ymm, ymm, ymm" xed="VXORPD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_setzero_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m256 with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="VXORPS" form="ymm, ymm, ymm" xed="VXORPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_setzero_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m256i with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="VPXOR" form="ymm, ymm, ymm" xed="VPXOR_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="e3" etype="FP64"/>
+ <parameter type="double" varname="e2" etype="FP64"/>
+ <parameter type="double" varname="e1" etype="FP64"/>
+ <parameter type="double" varname="e0" etype="FP64"/>
+ <description>Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values.</description>
+ <operation>
+dst[63:0] := e0
+dst[127:64] := e1
+dst[191:128] := e2
+dst[255:192] := e3
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="e7" etype="FP32"/>
+ <parameter type="float" varname="e6" etype="FP32"/>
+ <parameter type="float" varname="e5" etype="FP32"/>
+ <parameter type="float" varname="e4" etype="FP32"/>
+ <parameter type="float" varname="e3" etype="FP32"/>
+ <parameter type="float" varname="e2" etype="FP32"/>
+ <parameter type="float" varname="e1" etype="FP32"/>
+ <parameter type="float" varname="e0" etype="FP32"/>
+ <description>Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values.</description>
+ <operation>
+dst[31:0] := e0
+dst[63:32] := e1
+dst[95:64] := e2
+dst[127:96] := e3
+dst[159:128] := e4
+dst[191:160] := e5
+dst[223:192] := e6
+dst[255:224] := e7
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set_epi8">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="char" varname="e31" etype="UI8"/>
+ <parameter type="char" varname="e30" etype="UI8"/>
+ <parameter type="char" varname="e29" etype="UI8"/>
+ <parameter type="char" varname="e28" etype="UI8"/>
+ <parameter type="char" varname="e27" etype="UI8"/>
+ <parameter type="char" varname="e26" etype="UI8"/>
+ <parameter type="char" varname="e25" etype="UI8"/>
+ <parameter type="char" varname="e24" etype="UI8"/>
+ <parameter type="char" varname="e23" etype="UI8"/>
+ <parameter type="char" varname="e22" etype="UI8"/>
+ <parameter type="char" varname="e21" etype="UI8"/>
+ <parameter type="char" varname="e20" etype="UI8"/>
+ <parameter type="char" varname="e19" etype="UI8"/>
+ <parameter type="char" varname="e18" etype="UI8"/>
+ <parameter type="char" varname="e17" etype="UI8"/>
+ <parameter type="char" varname="e16" etype="UI8"/>
+ <parameter type="char" varname="e15" etype="UI8"/>
+ <parameter type="char" varname="e14" etype="UI8"/>
+ <parameter type="char" varname="e13" etype="UI8"/>
+ <parameter type="char" varname="e12" etype="UI8"/>
+ <parameter type="char" varname="e11" etype="UI8"/>
+ <parameter type="char" varname="e10" etype="UI8"/>
+ <parameter type="char" varname="e9" etype="UI8"/>
+ <parameter type="char" varname="e8" etype="UI8"/>
+ <parameter type="char" varname="e7" etype="UI8"/>
+ <parameter type="char" varname="e6" etype="UI8"/>
+ <parameter type="char" varname="e5" etype="UI8"/>
+ <parameter type="char" varname="e4" etype="UI8"/>
+ <parameter type="char" varname="e3" etype="UI8"/>
+ <parameter type="char" varname="e2" etype="UI8"/>
+ <parameter type="char" varname="e1" etype="UI8"/>
+ <parameter type="char" varname="e0" etype="UI8"/>
+ <description>Set packed 8-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[7:0] := e0
+dst[15:8] := e1
+dst[23:16] := e2
+dst[31:24] := e3
+dst[39:32] := e4
+dst[47:40] := e5
+dst[55:48] := e6
+dst[63:56] := e7
+dst[71:64] := e8
+dst[79:72] := e9
+dst[87:80] := e10
+dst[95:88] := e11
+dst[103:96] := e12
+dst[111:104] := e13
+dst[119:112] := e14
+dst[127:120] := e15
+dst[135:128] := e16
+dst[143:136] := e17
+dst[151:144] := e18
+dst[159:152] := e19
+dst[167:160] := e20
+dst[175:168] := e21
+dst[183:176] := e22
+dst[191:184] := e23
+dst[199:192] := e24
+dst[207:200] := e25
+dst[215:208] := e26
+dst[223:216] := e27
+dst[231:224] := e28
+dst[239:232] := e29
+dst[247:240] := e30
+dst[255:248] := e31
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set_epi16">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="short" varname="e15" etype="UI16"/>
+ <parameter type="short" varname="e14" etype="UI16"/>
+ <parameter type="short" varname="e13" etype="UI16"/>
+ <parameter type="short" varname="e12" etype="UI16"/>
+ <parameter type="short" varname="e11" etype="UI16"/>
+ <parameter type="short" varname="e10" etype="UI16"/>
+ <parameter type="short" varname="e9" etype="UI16"/>
+ <parameter type="short" varname="e8" etype="UI16"/>
+ <parameter type="short" varname="e7" etype="UI16"/>
+ <parameter type="short" varname="e6" etype="UI16"/>
+ <parameter type="short" varname="e5" etype="UI16"/>
+ <parameter type="short" varname="e4" etype="UI16"/>
+ <parameter type="short" varname="e3" etype="UI16"/>
+ <parameter type="short" varname="e2" etype="UI16"/>
+ <parameter type="short" varname="e1" etype="UI16"/>
+ <parameter type="short" varname="e0" etype="UI16"/>
+ <description>Set packed 16-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[15:0] := e0
+dst[31:16] := e1
+dst[47:32] := e2
+dst[63:48] := e3
+dst[79:64] := e4
+dst[95:80] := e5
+dst[111:96] := e6
+dst[127:112] := e7
+dst[143:128] := e8
+dst[159:144] := e9
+dst[175:160] := e10
+dst[191:176] := e11
+dst[207:192] := e12
+dst[223:208] := e13
+dst[239:224] := e14
+dst[255:240] := e15
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="e7" etype="UI32"/>
+ <parameter type="int" varname="e6" etype="UI32"/>
+ <parameter type="int" varname="e5" etype="UI32"/>
+ <parameter type="int" varname="e4" etype="UI32"/>
+ <parameter type="int" varname="e3" etype="UI32"/>
+ <parameter type="int" varname="e2" etype="UI32"/>
+ <parameter type="int" varname="e1" etype="UI32"/>
+ <parameter type="int" varname="e0" etype="UI32"/>
+ <description>Set packed 32-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[31:0] := e0
+dst[63:32] := e1
+dst[95:64] := e2
+dst[127:96] := e3
+dst[159:128] := e4
+dst[191:160] := e5
+dst[223:192] := e6
+dst[255:224] := e7
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set_epi64x">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="e3" etype="UI64"/>
+ <parameter type="__int64" varname="e2" etype="UI64"/>
+ <parameter type="__int64" varname="e1" etype="UI64"/>
+ <parameter type="__int64" varname="e0" etype="UI64"/>
+ <description>Set packed 64-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[63:0] := e0
+dst[127:64] := e1
+dst[191:128] := e2
+dst[255:192] := e3
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_setr_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="e3" etype="FP64"/>
+ <parameter type="double" varname="e2" etype="FP64"/>
+ <parameter type="double" varname="e1" etype="FP64"/>
+ <parameter type="double" varname="e0" etype="FP64"/>
+ <description>Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[63:0] := e3
+dst[127:64] := e2
+dst[191:128] := e1
+dst[255:192] := e0
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_setr_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="e7" etype="FP32"/>
+ <parameter type="float" varname="e6" etype="FP32"/>
+ <parameter type="float" varname="e5" etype="FP32"/>
+ <parameter type="float" varname="e4" etype="FP32"/>
+ <parameter type="float" varname="e3" etype="FP32"/>
+ <parameter type="float" varname="e2" etype="FP32"/>
+ <parameter type="float" varname="e1" etype="FP32"/>
+ <parameter type="float" varname="e0" etype="FP32"/>
+ <description>Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[31:0] := e7
+dst[63:32] := e6
+dst[95:64] := e5
+dst[127:96] := e4
+dst[159:128] := e3
+dst[191:160] := e2
+dst[223:192] := e1
+dst[255:224] := e0
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_setr_epi8">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="char" varname="e31" etype="UI8"/>
+ <parameter type="char" varname="e30" etype="UI8"/>
+ <parameter type="char" varname="e29" etype="UI8"/>
+ <parameter type="char" varname="e28" etype="UI8"/>
+ <parameter type="char" varname="e27" etype="UI8"/>
+ <parameter type="char" varname="e26" etype="UI8"/>
+ <parameter type="char" varname="e25" etype="UI8"/>
+ <parameter type="char" varname="e24" etype="UI8"/>
+ <parameter type="char" varname="e23" etype="UI8"/>
+ <parameter type="char" varname="e22" etype="UI8"/>
+ <parameter type="char" varname="e21" etype="UI8"/>
+ <parameter type="char" varname="e20" etype="UI8"/>
+ <parameter type="char" varname="e19" etype="UI8"/>
+ <parameter type="char" varname="e18" etype="UI8"/>
+ <parameter type="char" varname="e17" etype="UI8"/>
+ <parameter type="char" varname="e16" etype="UI8"/>
+ <parameter type="char" varname="e15" etype="UI8"/>
+ <parameter type="char" varname="e14" etype="UI8"/>
+ <parameter type="char" varname="e13" etype="UI8"/>
+ <parameter type="char" varname="e12" etype="UI8"/>
+ <parameter type="char" varname="e11" etype="UI8"/>
+ <parameter type="char" varname="e10" etype="UI8"/>
+ <parameter type="char" varname="e9" etype="UI8"/>
+ <parameter type="char" varname="e8" etype="UI8"/>
+ <parameter type="char" varname="e7" etype="UI8"/>
+ <parameter type="char" varname="e6" etype="UI8"/>
+ <parameter type="char" varname="e5" etype="UI8"/>
+ <parameter type="char" varname="e4" etype="UI8"/>
+ <parameter type="char" varname="e3" etype="UI8"/>
+ <parameter type="char" varname="e2" etype="UI8"/>
+ <parameter type="char" varname="e1" etype="UI8"/>
+ <parameter type="char" varname="e0" etype="UI8"/>
+ <description>Set packed 8-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[7:0] := e31
+dst[15:8] := e30
+dst[23:16] := e29
+dst[31:24] := e28
+dst[39:32] := e27
+dst[47:40] := e26
+dst[55:48] := e25
+dst[63:56] := e24
+dst[71:64] := e23
+dst[79:72] := e22
+dst[87:80] := e21
+dst[95:88] := e20
+dst[103:96] := e19
+dst[111:104] := e18
+dst[119:112] := e17
+dst[127:120] := e16
+dst[135:128] := e15
+dst[143:136] := e14
+dst[151:144] := e13
+dst[159:152] := e12
+dst[167:160] := e11
+dst[175:168] := e10
+dst[183:176] := e9
+dst[191:184] := e8
+dst[199:192] := e7
+dst[207:200] := e6
+dst[215:208] := e5
+dst[223:216] := e4
+dst[231:224] := e3
+dst[239:232] := e2
+dst[247:240] := e1
+dst[255:248] := e0
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_setr_epi16">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="short" varname="e15" etype="UI16"/>
+ <parameter type="short" varname="e14" etype="UI16"/>
+ <parameter type="short" varname="e13" etype="UI16"/>
+ <parameter type="short" varname="e12" etype="UI16"/>
+ <parameter type="short" varname="e11" etype="UI16"/>
+ <parameter type="short" varname="e10" etype="UI16"/>
+ <parameter type="short" varname="e9" etype="UI16"/>
+ <parameter type="short" varname="e8" etype="UI16"/>
+ <parameter type="short" varname="e7" etype="UI16"/>
+ <parameter type="short" varname="e6" etype="UI16"/>
+ <parameter type="short" varname="e5" etype="UI16"/>
+ <parameter type="short" varname="e4" etype="UI16"/>
+ <parameter type="short" varname="e3" etype="UI16"/>
+ <parameter type="short" varname="e2" etype="UI16"/>
+ <parameter type="short" varname="e1" etype="UI16"/>
+ <parameter type="short" varname="e0" etype="UI16"/>
+ <description>Set packed 16-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[15:0] := e15
+dst[31:16] := e14
+dst[47:32] := e13
+dst[63:48] := e12
+dst[79:64] := e11
+dst[95:80] := e10
+dst[111:96] := e9
+dst[127:112] := e8
+dst[143:128] := e7
+dst[159:144] := e6
+dst[175:160] := e5
+dst[191:176] := e4
+dst[207:192] := e3
+dst[223:208] := e2
+dst[239:224] := e1
+dst[255:240] := e0
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_setr_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="e7" etype="UI32"/>
+ <parameter type="int" varname="e6" etype="UI32"/>
+ <parameter type="int" varname="e5" etype="UI32"/>
+ <parameter type="int" varname="e4" etype="UI32"/>
+ <parameter type="int" varname="e3" etype="UI32"/>
+ <parameter type="int" varname="e2" etype="UI32"/>
+ <parameter type="int" varname="e1" etype="UI32"/>
+ <parameter type="int" varname="e0" etype="UI32"/>
+ <description>Set packed 32-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[31:0] := e7
+dst[63:32] := e6
+dst[95:64] := e5
+dst[127:96] := e4
+dst[159:128] := e3
+dst[191:160] := e2
+dst[223:192] := e1
+dst[255:224] := e0
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_setr_epi64x">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="e3" etype="UI64"/>
+ <parameter type="__int64" varname="e2" etype="UI64"/>
+ <parameter type="__int64" varname="e1" etype="UI64"/>
+ <parameter type="__int64" varname="e0" etype="UI64"/>
+ <description>Set packed 64-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[63:0] := e3
+dst[127:64] := e2
+dst[191:128] := e1
+dst[255:192] := e0
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set1_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="a" etype="FP64"/>
+ <description>Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set1_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="a" etype="FP32"/>
+ <description>Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set1_epi8">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="char" varname="a" etype="UI8"/>
+ <description>Broadcast 8-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastb".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := a[7:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set1_epi16">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="short" varname="a" etype="UI16"/>
+ <description>Broadcast 16-bit integer "a" to all all elements of "dst". This intrinsic may generate the "vpbroadcastw".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := a[15:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set1_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Broadcast 32-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastd".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_set1_epi64x">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="long long" varname="a" etype="UI64"/>
+ <description>Broadcast 64-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastq".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m256d to type __m256.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castps_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m256 to type __m256d.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castps_si256">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m256 to type __m256i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castpd_si256">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m256d to type __m256i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castsi256_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Cast vector of type __m256i to type __m256. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castsi256_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Cast vector of type __m256i to type __m256d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castps256_ps128">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m256 to type __m128. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castpd256_pd128">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m256d to type __m128d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castsi256_si128">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <description>Cast vector of type __m256i to type __m128i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castps128_ps256">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m128 to type __m256; the upper 128 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castpd128_pd256">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m128d to type __m256d; the upper 128 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_castsi128_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m128i" varname="a" etype="M256"/>
+ <description>Cast vector of type __m128i to type __m256i; the upper 128 bits of the result are undefined. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_zextps128_ps256">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m128 to type __m256; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_zextpd128_pd256">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m128d to type __m256d; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_zextsi128_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Cast</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m128i" varname="a" etype="M256"/>
+ <description>Cast vector of type __m128i to type __m256i; the upper 128 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_floor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := FLOOR(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VROUNDPS" form="ymm, ymm, imm8" xed="VROUNDPS_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_ceil_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := CEIL(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VROUNDPS" form="ymm, ymm, imm8" xed="VROUNDPS_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_floor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := FLOOR(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VROUNDPD" form="ymm, ymm, imm8" xed="VROUNDPD_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_ceil_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := CEIL(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VROUNDPD" form="ymm, ymm, imm8" xed="VROUNDPD_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_undefined_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>General Support</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m256 with undefined elements.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_undefined_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>General Support</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m256d with undefined elements.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_undefined_si256">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>General Support</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m256i with undefined elements.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_set_m128">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="hi" etype="FP32"/>
+ <parameter type="__m128" varname="lo" etype="FP32"/>
+ <description>Set packed __m256 vector "dst" with the supplied values.</description>
+ <operation>
+dst[127:0] := lo[127:0]
+dst[255:128] := hi[127:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF128" form="ymm, ymm, xmm, imm8" xed="VINSERTF128_YMMqq_YMMqq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_set_m128d">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="hi" etype="FP64"/>
+ <parameter type="__m128d" varname="lo" etype="FP64"/>
+ <description>Set packed __m256d vector "dst" with the supplied values.</description>
+ <operation>
+dst[127:0] := lo[127:0]
+dst[255:128] := hi[127:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF128" form="ymm, ymm, xmm, imm8" xed="VINSERTF128_YMMqq_YMMqq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_set_m128i">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="hi" etype="M128"/>
+ <parameter type="__m128i" varname="lo" etype="M128"/>
+ <description>Set packed __m256i vector "dst" with the supplied values.</description>
+ <operation>
+dst[127:0] := lo[127:0]
+dst[255:128] := hi[127:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF128" form="ymm, ymm, xmm, imm8" xed="VINSERTF128_YMMqq_YMMqq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_setr_m128">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="lo" etype="FP32"/>
+ <parameter type="__m128" varname="hi" etype="FP32"/>
+ <description>Set packed __m256 vector "dst" with the supplied values.</description>
+ <operation>
+dst[127:0] := lo[127:0]
+dst[255:128] := hi[127:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF128" form="ymm, ymm, xmm, imm8" xed="VINSERTF128_YMMqq_YMMqq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_setr_m128d">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="lo" etype="FP64"/>
+ <parameter type="__m128d" varname="hi" etype="FP64"/>
+ <description>Set packed __m256d vector "dst" with the supplied values.</description>
+ <operation>
+dst[127:0] := lo[127:0]
+dst[255:128] := hi[127:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF128" form="ymm, ymm, xmm, imm8" xed="VINSERTF128_YMMqq_YMMqq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" name="_mm256_setr_m128i">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="lo" etype="M128"/>
+ <parameter type="__m128i" varname="hi" etype="M128"/>
+ <description>Set packed __m256i vector "dst" with the supplied values.</description>
+ <operation>
+dst[127:0] := lo[127:0]
+dst[255:128] := hi[127:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF128" form="ymm, ymm, xmm, imm8" xed="VINSERTF128_YMMqq_YMMqq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_loadu2_m128">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="float const*" varname="hiaddr" etype="FP32" memwidth="128"/>
+ <parameter type="float const*" varname="loaddr" etype="FP32" memwidth="128"/>
+ <description>Load two 128-bit values (composed of 4 packed single-precision (32-bit) floating-point elements) from memory, and combine them into a 256-bit value in "dst".
+ "hiaddr" and "loaddr" do not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[127:0] := MEM[loaddr+127:loaddr]
+dst[255:128] := MEM[hiaddr+127:hiaddr]
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_loadu2_m128d">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="hiaddr" etype="FP64" memwidth="128"/>
+ <parameter type="double const*" varname="loaddr" etype="FP64" memwidth="128"/>
+ <description>Load two 128-bit values (composed of 2 packed double-precision (64-bit) floating-point elements) from memory, and combine them into a 256-bit value in "dst".
+ "hiaddr" and "loaddr" do not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[127:0] := MEM[loaddr+127:loaddr]
+dst[255:128] := MEM[hiaddr+127:hiaddr]
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_loadu2_m128i">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m128i const*" varname="hiaddr" etype="M128" memwidth="128"/>
+ <parameter type="__m128i const*" varname="loaddr" etype="M128" memwidth="128"/>
+ <description>Load two 128-bit values (composed of integer data) from memory, and combine them into a 256-bit value in "dst".
+ "hiaddr" and "loaddr" do not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[127:0] := MEM[loaddr+127:loaddr]
+dst[255:128] := MEM[hiaddr+127:hiaddr]
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_storeu2_m128">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float*" varname="hiaddr" etype="FP32" memwidth="128"/>
+ <parameter type="float*" varname="loaddr" etype="FP32" memwidth="128"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Store the high and low 128-bit halves (each composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory two different 128-bit locations.
+ "hiaddr" and "loaddr" do not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[loaddr+127:loaddr] := a[127:0]
+MEM[hiaddr+127:hiaddr] := a[255:128]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_storeu2_m128d">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double*" varname="hiaddr" etype="FP64" memwidth="128"/>
+ <parameter type="double*" varname="loaddr" etype="FP64" memwidth="128"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Store the high and low 128-bit halves (each composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory two different 128-bit locations.
+ "hiaddr" and "loaddr" do not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[loaddr+127:loaddr] := a[127:0]
+MEM[hiaddr+127:hiaddr] := a[255:128]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" sequence="TRUE" name="_mm256_storeu2_m128i">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m128i*" varname="hiaddr" etype="M128" memwidth="128"/>
+ <parameter type="__m128i*" varname="loaddr" etype="M128" memwidth="128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <description>Store the high and low 128-bit halves (each composed of integer data) from "a" into memory two different 128-bit locations.
+ "hiaddr" and "loaddr" do not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[loaddr+127:loaddr] := a[127:0]
+MEM[hiaddr+127:hiaddr] := a[255:128]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_acos_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ACOS(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_acos_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ACOS(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_acosh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ACOSH(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_acosh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ACOSH(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_asin_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ASIN(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_asin_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ASIN(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_asinh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ASINH(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_asinh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ASINH(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_atan_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ATAN(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_atan_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ATAN(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_atan2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_atan2_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_atanh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ATANH(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_atanh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ATANH(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cbrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := CubeRoot(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cbrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := CubeRoot(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cdfnorm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := CDFNormal(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cdfnorm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := CDFNormal(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cdfnorminv_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := InverseCDFNormal(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cdfnorminv_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := InverseCDFNormal(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cexp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of "e" raised to the power of packed complex numbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]".</description>
+ <operation>
+DEFINE CEXP(a[31:0], b[31:0]) {
+ result[31:0] := POW(FP32(e), a[31:0]) * COS(b[31:0])
+ result[63:32] := POW(FP32(e), a[31:0]) * SIN(b[31:0])
+ RETURN result
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := CEXP(a[i+31:i], a[i+63:i+32])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_clog_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the natural logarithm of packed complex numbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]".</description>
+ <operation>
+DEFINE CLOG(a[31:0], b[31:0]) {
+ result[31:0] := LOG(SQRT(POW(a, 2.0) + POW(b, 2.0)))
+ result[63:32] := ATAN2(b, a)
+ RETURN result
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := CLOG(a[i+31:i], a[i+63:i+32])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cos_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := COS(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cos_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := COS(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cosd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := COSD(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cosd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := COSD(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cosh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := COSH(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_cosh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := COSH(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_csqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed complex snumbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]".</description>
+ <operation>
+DEFINE CSQRT(a[31:0], b[31:0]) {
+ sign[31:0] := (b &lt; 0.0) ? -FP32(1.0) : FP32(1.0)
+ result[31:0] := SQRT((a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0)
+ result[63:32] := sign * SQRT((-a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0)
+ RETURN result
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := CSQRT(a[i+31:i], a[i+63:i+32])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_div_epi8">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Divide packed signed 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := 8*j
+ IF b[i+7:i] == 0
+ #DE
+ FI
+ dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_div_epi16">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Divide packed signed 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ IF b[i+15:i] == 0
+ #DE
+ FI
+ dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_div_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Divide packed signed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF b[i+31:i] == 0
+ #DE
+ FI
+ dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_div_epi64">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Divide packed signed 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ IF b[i+63:i] == 0
+ #DE
+ FI
+ dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_div_epu8">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := 8*j
+ IF b[i+7:i] == 0
+ #DE
+ FI
+ dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_div_epu16">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ IF b[i+15:i] == 0
+ #DE
+ FI
+ dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_div_epu32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF b[i+31:i] == 0
+ #DE
+ FI
+ dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_div_epu64">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ IF b[i+63:i] == 0
+ #DE
+ FI
+ dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_erf_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ERF(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_erf_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ERF(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_erfc_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := 1.0 - ERF(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_erfc_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+63:i] := 1.0 - ERF(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_erfcinv_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i]))
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_erfcinv_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i]))
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_erfinv_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := 1.0 / ERF(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_erfinv_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+63:i] := 1.0 / ERF(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_exp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := POW(e, a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_exp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := POW(FP32(e), a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_exp10_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := POW(10.0, a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_exp10_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := POW(FP32(10.0), a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_exp2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := POW(2.0, a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_exp2_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := POW(FP32(2.0), a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_expm1_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := POW(e, a[i+63:i]) - 1.0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_expm1_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_hypot_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0))
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_hypot_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0))
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_idiv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_idivrem_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i *" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Divide packed 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed 32-bit integers into memory at "mem_addr".</description>
+ <operation>FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i])
+ MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_invcbrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the inverse cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := InvCubeRoot(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_invcbrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the inverse cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := InvCubeRoot(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_invsqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := InvSQRT(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_invsqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := InvSQRT(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_irem_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_log_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := LOG(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_log_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := LOG(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_log10_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_log10_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_log1p_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := LOG(1.0 + a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_log1p_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := LOG(1.0 + a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_log2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_log2_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_logb_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_logb_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_pow_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := POW(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_pow_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := POW(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_rem_epi8">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Divide packed 8-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 31
+ i := 8*j
+ dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_rem_epi16">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Divide packed 16-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := 16*j
+ dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_rem_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_rem_epi64">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Divide packed 64-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := 64*j
+ dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_rem_epu8">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 31
+ i := 8*j
+ dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_rem_epu16">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := 16*j
+ dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_rem_epu32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_rem_epu64">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := 64*j
+ dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_sin_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := SIN(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_sin_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := SIN(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_sincos_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d *" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the sine and cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := SIN(a[i+63:i])
+ MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_sincos_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256 *" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the sine and cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := SIN(a[i+31:i])
+ MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_sind_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := SIND(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_sind_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := SIND(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_sinh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := SINH(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_sinh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := SINH(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_svml_ceil_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := CEIL(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_svml_ceil_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := CEIL(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_svml_floor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := FLOOR(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_svml_floor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := FLOOR(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_svml_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ROUND(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_svml_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ROUND(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_svml_sqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_pd".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := SQRT(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_svml_sqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_ps".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := SQRT(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_tan_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := TAN(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_tan_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := TAN(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_tand_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := TAND(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_tand_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := TAND(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_tanh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := TANH(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_tanh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := TANH(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_trunc_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction.</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := TRUNCATE(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_trunc_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction.</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := TRUNCATE(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_udiv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_udivrem_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i *" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed unsigned 32-bit integers into memory at "mem_addr".</description>
+ <operation>FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i])
+ MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm256_urem_epi32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" vexEq="TRUE" name="_mm256_cvtss_f32">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Convert</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Copy the lower single-precision (32-bit) floating-point element of "a" to "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+ </operation>
+ <instruction name="VMOVSS" form="m32, xmm" xed="VMOVSS_MEMd_XMMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" vexEq="TRUE" name="_mm256_cvtsd_f64">
+ <type>Floating Point</type>
+ <CPUID>AVX</CPUID>
+ <category>Convert</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Copy the lower double-precision (64-bit) floating-point element of "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+ </operation>
+ <instruction name="VMOVSD" form="m64, xmm" xed="VMOVSD_MEMq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX" vexEq="TRUE" name="_mm256_cvtsi256_si32">
+ <type>Integer</type>
+ <CPUID>AVX</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Copy the lower 32-bit integer in "a" to "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+ </operation>
+ <instruction name="VMOVD" form="r32, xmm" xed="VMOVD_GPR32d_XMMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" sequence="TRUE" name="_mm256_extract_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="int" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="const int" varname="index" etype="IMM" immwidth="5"/>
+ <description>Extract an 8-bit integer from "a", selected with "index", and store the result in "dst".</description>
+ <operation>
+dst[7:0] := (a[255:0] &gt;&gt; (index[4:0] * 8))[7:0]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" sequence="TRUE" name="_mm256_extract_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="int" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="const int" varname="index" etype="IMM" immwidth="4"/>
+ <description>Extract a 16-bit integer from "a", selected with "index", and store the result in "dst".</description>
+ <operation>
+dst[15:0] := (a[255:0] &gt;&gt; (index[3:0] * 16))[15:0]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_abs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <description>Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := ABS(a[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSB" form="ymm, ymm" xed="VPABSB_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_abs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <description>Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := ABS(a[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSW" form="ymm, ymm" xed="VPABSW_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_abs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ABS(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSD" form="ymm, ymm" xed="VPABSD_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_add_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Add packed 8-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := a[i+7:i] + b[i+7:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDB" form="ymm, ymm, ymm" xed="VPADDB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_add_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Add packed 16-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := a[i+15:i] + b[i+15:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDW" form="ymm, ymm, ymm" xed="VPADDW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_add_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Add packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDD" form="ymm, ymm, ymm" xed="VPADDD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_add_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Add packed 64-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDQ" form="ymm, ymm, ymm" xed="VPADDQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_adds_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Add packed 8-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDSB" form="ymm, ymm, ymm" xed="VPADDSB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_adds_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Add packed 16-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] )
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDSW" form="ymm, ymm, ymm" xed="VPADDSW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_adds_epu8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] )
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDUSB" form="ymm, ymm, ymm" xed="VPADDUSB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_adds_epu16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] )
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDUSW" form="ymm, ymm, ymm" xed="VPADDUSW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_alignr_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*128
+ tmp[255:0] := ((a[i+127:i] &lt;&lt; 128)[255:0] OR b[i+127:i]) &gt;&gt; (imm8*8)
+ dst[i+127:i] := tmp[127:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPALIGNR" form="ymm, ymm, ymm, imm8" xed="VPALIGNR_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_and_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <parameter type="__m256i" varname="b" etype="M256"/>
+ <description>Compute the bitwise AND of 256 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[255:0] := (a[255:0] AND b[255:0])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPAND" form="ymm, ymm, ymm" xed="VPAND_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_andnot_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <parameter type="__m256i" varname="b" etype="M256"/>
+ <description>Compute the bitwise NOT of 256 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst".</description>
+ <operation>
+dst[255:0] := ((NOT a[255:0]) AND b[255:0])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPANDN" form="ymm, ymm, ymm" xed="VPANDN_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_avg_epu8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) &gt;&gt; 1
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPAVGB" form="ymm, ymm, ymm" xed="VPAVGB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_avg_epu16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) &gt;&gt; 1
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPAVGW" form="ymm, ymm, ymm" xed="VPAVGW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_blend_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Blend packed 16-bit integers from "a" and "b" within 128-bit lanes using control mask "imm8", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF imm8[j%8]
+ dst[i+15:i] := b[i+15:i]
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBLENDW" form="ymm, ymm, ymm, imm8" xed="VPBLENDW_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_blend_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Blend packed 32-bit integers from "a" and "b" using control mask "imm8", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF imm8[j]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBLENDD" form="xmm, xmm, xmm, imm8" xed="VPBLENDD_XMMdq_XMMdq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_blend_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Blend packed 32-bit integers from "a" and "b" using control mask "imm8", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF imm8[j]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBLENDD" form="ymm, ymm, ymm, imm8" xed="VPBLENDD_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_blendv_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <description>Blend packed 8-bit integers from "a" and "b" using "mask", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF mask[i+7]
+ dst[i+7:i] := b[i+7:i]
+ ELSE
+ dst[i+7:i] := a[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBLENDVB" form="ymm, ymm, ymm, ymm" xed="VPBLENDVB_YMMqq_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_broadcastb_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Broadcast the low packed 8-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := a[7:0]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="xmm, xmm" xed="VPBROADCASTB_XMMdq_XMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_broadcastb_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Broadcast the low packed 8-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := a[7:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="ymm, xmm" xed="VPBROADCASTB_YMMqq_XMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_broadcastd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the low packed 32-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="xmm, xmm" xed="VPBROADCASTD_XMMdq_XMMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_broadcastd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the low packed 32-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="ymm, xmm" xed="VPBROADCASTD_YMMqq_XMMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_broadcastq_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the low packed 64-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="xmm, xmm" xed="VPBROADCASTQ_XMMdq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_broadcastq_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the low packed 64-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="ymm, xmm" xed="VPBROADCASTQ_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" vexEq="TRUE" name="_mm_broadcastsd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="MOVDDUP" form="xmm, xmm" xed="MOVDDUP_XMMdq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_broadcastsd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTSD" form="ymm, xmm" xed="VBROADCASTSD_YMMqq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_broadcastsi128_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <description>Broadcast 128 bits of integer data from "a" to all 128-bit lanes in "dst".</description>
+ <operation>
+dst[127:0] := a[127:0]
+dst[255:128] := a[127:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTI128" form="ymm, m128" xed="VBROADCASTI128_YMMqq_MEMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_broadcastsi128_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <description>Broadcast 128 bits of integer data from "a" to all 128-bit lanes in "dst".</description>
+ <operation>
+dst[127:0] := a[127:0]
+dst[255:128] := a[127:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTI128" form="ymm, m128" xed="VBROADCASTI128_YMMqq_MEMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_broadcastss_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VBROADCASTSS" form="xmm, xmm" xed="VBROADCASTSS_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_broadcastss_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTSS" form="ymm, xmm" xed="VBROADCASTSS_YMMqq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_broadcastw_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := a[15:0]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="xmm, xmm" xed="VPBROADCASTW_XMMdq_XMMw"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_broadcastw_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := a[15:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="ymm, xmm" xed="VPBROADCASTW_YMMqq_XMMw"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cmpeq_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCMPEQB" form="ymm, ymm, ymm" xed="VPCMPEQB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cmpeq_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCMPEQW" form="ymm, ymm, ymm" xed="VPCMPEQW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cmpeq_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCMPEQD" form="ymm, ymm, ymm" xed="VPCMPEQD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cmpeq_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed 64-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ( a[i+63:i] == b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCMPEQQ" form="ymm, ymm, ymm" xed="VPCMPEQQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cmpgt_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := ( a[i+7:i] &gt; b[i+7:i] ) ? 0xFF : 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCMPGTB" form="ymm, ymm, ymm" xed="VPCMPGTB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cmpgt_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := ( a[i+15:i] &gt; b[i+15:i] ) ? 0xFFFF : 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCMPGTW" form="ymm, ymm, ymm" xed="VPCMPGTW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cmpgt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] &gt; b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCMPGTD" form="ymm, ymm, ymm" xed="VPCMPGTD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cmpgt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ( a[i+63:i] &gt; b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCMPGTQ" form="ymm, ymm, ymm" xed="VPCMPGTQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepi16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j:= 0 to 7
+ i := 32*j
+ k := 16*j
+ dst[i+31:i] := SignExtend32(a[k+15:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXWD" form="ymm, xmm" xed="VPMOVSXWD_YMMqq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepi16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j:= 0 to 3
+ i := 64*j
+ k := 16*j
+ dst[i+63:i] := SignExtend64(a[k+15:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXWQ" form="ymm, xmm" xed="VPMOVSXWQ_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepi32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j:= 0 to 3
+ i := 64*j
+ k := 32*j
+ dst[i+63:i] := SignExtend64(a[k+31:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXDQ" form="ymm, xmm" xed="VPMOVSXDQ_YMMqq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepi8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ l := j*16
+ dst[l+15:l] := SignExtend16(a[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXBW" form="ymm, xmm" xed="VPMOVSXBW_YMMqq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepi8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 8*j
+ dst[i+31:i] := SignExtend32(a[k+7:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXBD" form="ymm, xmm" xed="VPMOVSXBD_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepi8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 8*j
+ dst[i+63:i] := SignExtend64(a[k+7:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXBQ" form="ymm, xmm" xed="VPMOVSXBQ_YMMqq_XMMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepu16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 16*j
+ dst[i+31:i] := ZeroExtend32(a[k+15:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXWD" form="ymm, xmm" xed="VPMOVZXWD_YMMqq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepu16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j:= 0 to 3
+ i := 64*j
+ k := 16*j
+ dst[i+63:i] := ZeroExtend64(a[k+15:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXWQ" form="ymm, xmm" xed="VPMOVZXWQ_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepu32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j:= 0 to 3
+ i := 64*j
+ k := 32*j
+ dst[i+63:i] := ZeroExtend64(a[k+31:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXDQ" form="ymm, xmm" xed="VPMOVZXDQ_YMMqq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepu8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ l := j*16
+ dst[l+15:l] := ZeroExtend16(a[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXBW" form="ymm, xmm" xed="VPMOVZXBW_YMMqq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepu8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 8*j
+ dst[i+31:i] := ZeroExtend32(a[k+7:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXBD" form="ymm, xmm" xed="VPMOVZXBD_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_cvtepu8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 8*j
+ dst[i+63:i] := ZeroExtend64(a[k+7:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXBQ" form="ymm, xmm" xed="VPMOVZXBQ_YMMqq_XMMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_extracti128_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of integer data) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI128" form="xmm, ymm, imm8" xed="VEXTRACTI128_XMMdq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_hadd_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := a[31:16] + a[15:0]
+dst[31:16] := a[63:48] + a[47:32]
+dst[47:32] := a[95:80] + a[79:64]
+dst[63:48] := a[127:112] + a[111:96]
+dst[79:64] := b[31:16] + b[15:0]
+dst[95:80] := b[63:48] + b[47:32]
+dst[111:96] := b[95:80] + b[79:64]
+dst[127:112] := b[127:112] + b[111:96]
+dst[143:128] := a[159:144] + a[143:128]
+dst[159:144] := a[191:176] + a[175:160]
+dst[175:160] := a[223:208] + a[207:192]
+dst[191:176] := a[255:240] + a[239:224]
+dst[207:192] := b[159:144] + b[143:128]
+dst[223:208] := b[191:176] + b[175:160]
+dst[239:224] := b[223:208] + b[207:192]
+dst[255:240] := b[255:240] + b[239:224]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPHADDW" form="ymm, ymm, ymm" xed="VPHADDW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_hadd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst".</description>
+ <operation>
+dst[31:0] := a[63:32] + a[31:0]
+dst[63:32] := a[127:96] + a[95:64]
+dst[95:64] := b[63:32] + b[31:0]
+dst[127:96] := b[127:96] + b[95:64]
+dst[159:128] := a[191:160] + a[159:128]
+dst[191:160] := a[255:224] + a[223:192]
+dst[223:192] := b[191:160] + b[159:128]
+dst[255:224] := b[255:224] + b[223:192]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPHADDD" form="ymm, ymm, ymm" xed="VPHADDD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_hadds_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Horizontally add adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := Saturate16(a[31:16] + a[15:0])
+dst[31:16] := Saturate16(a[63:48] + a[47:32])
+dst[47:32] := Saturate16(a[95:80] + a[79:64])
+dst[63:48] := Saturate16(a[127:112] + a[111:96])
+dst[79:64] := Saturate16(b[31:16] + b[15:0])
+dst[95:80] := Saturate16(b[63:48] + b[47:32])
+dst[111:96] := Saturate16(b[95:80] + b[79:64])
+dst[127:112] := Saturate16(b[127:112] + b[111:96])
+dst[143:128] := Saturate16(a[159:144] + a[143:128])
+dst[159:144] := Saturate16(a[191:176] + a[175:160])
+dst[175:160] := Saturate16(a[223:208] + a[207:192])
+dst[191:176] := Saturate16(a[255:240] + a[239:224])
+dst[207:192] := Saturate16(b[159:144] + b[143:128])
+dst[223:208] := Saturate16(b[191:176] + b[175:160])
+dst[239:224] := Saturate16(b[223:208] + b[207:192])
+dst[255:240] := Saturate16(b[255:240] + b[239:224])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPHADDSW" form="ymm, ymm, ymm" xed="VPHADDSW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_hsub_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := a[15:0] - a[31:16]
+dst[31:16] := a[47:32] - a[63:48]
+dst[47:32] := a[79:64] - a[95:80]
+dst[63:48] := a[111:96] - a[127:112]
+dst[79:64] := b[15:0] - b[31:16]
+dst[95:80] := b[47:32] - b[63:48]
+dst[111:96] := b[79:64] - b[95:80]
+dst[127:112] := b[111:96] - b[127:112]
+dst[143:128] := a[143:128] - a[159:144]
+dst[159:144] := a[175:160] - a[191:176]
+dst[175:160] := a[207:192] - a[223:208]
+dst[191:176] := a[239:224] - a[255:240]
+dst[207:192] := b[143:128] - b[159:144]
+dst[223:208] := b[175:160] - b[191:176]
+dst[239:224] := b[207:192] - b[223:208]
+dst[255:240] := b[239:224] - b[255:240]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPHSUBW" form="ymm, ymm, ymm" xed="VPHSUBW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_hsub_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst".</description>
+ <operation>
+dst[31:0] := a[31:0] - a[63:32]
+dst[63:32] := a[95:64] - a[127:96]
+dst[95:64] := b[31:0] - b[63:32]
+dst[127:96] := b[95:64] - b[127:96]
+dst[159:128] := a[159:128] - a[191:160]
+dst[191:160] := a[223:192] - a[255:224]
+dst[223:192] := b[159:128] - b[191:160]
+dst[255:224] := b[223:192] - b[255:224]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPHSUBD" form="ymm, ymm, ymm" xed="VPHSUBD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_hsubs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Horizontally subtract adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := Saturate16(a[15:0] - a[31:16])
+dst[31:16] := Saturate16(a[47:32] - a[63:48])
+dst[47:32] := Saturate16(a[79:64] - a[95:80])
+dst[63:48] := Saturate16(a[111:96] - a[127:112])
+dst[79:64] := Saturate16(b[15:0] - b[31:16])
+dst[95:80] := Saturate16(b[47:32] - b[63:48])
+dst[111:96] := Saturate16(b[79:64] - b[95:80])
+dst[127:112] := Saturate16(b[111:96] - b[127:112])
+dst[143:128] := Saturate16(a[143:128] - a[159:144])
+dst[159:144] := Saturate16(a[175:160] - a[191:176])
+dst[175:160] := Saturate16(a[207:192] - a[223:208])
+dst[191:176] := Saturate16(a[239:224] - a[255:240])
+dst[207:192] := Saturate16(b[143:128] - b[159:144])
+dst[223:208] := Saturate16(b[175:160] - b[191:176])
+dst[239:224] := Saturate16(b[207:192] - b[223:208])
+dst[255:240] := Saturate16(b[239:224] - b[255:240])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPHSUBSW" form="ymm, ymm, ymm" xed="VPHSUBSW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_i32gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="xmm, vm32x, xmm" xed="VGATHERDPD_XMMf64_MEMf64_XMMi64_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_i32gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="ymm, vm32x, ymm" xed="VGATHERDPD_YMMf64_MEMf64_YMMi64_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_i32gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float const*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERDPS" form="xmm, vm32x, xmm" xed="VGATHERDPS_XMMf32_MEMf32_XMMi32_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_i32gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="float const*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGATHERDPS" form="ymm, vm32x, ymm" xed="VGATHERDPS_YMMf32_MEMf32_YMMi32_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_i32gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="int const*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERDD" form="xmm, vm32x, xmm" xed="VPGATHERDD_XMMu32_MEMd_XMMi32_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_i32gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="int const*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPGATHERDD" form="ymm, vm32x, ymm" xed="VPGATHERDD_YMMu32_MEMd_YMMi32_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_i32gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__int64 const*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="xmm, vm32x, xmm" xed="VPGATHERDQ_XMMu64_MEMq_XMMi64_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_i32gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__int64 const*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="ymm, vm32x, ymm" xed="VPGATHERDQ_YMMu64_MEMq_YMMi64_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_i64gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERQPD" form="xmm, vm64x, xmm" xed="VGATHERQPD_XMMf64_MEMf64_XMMi64_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_i64gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGATHERQPD" form="ymm, vm64x, ymm" xed="VGATHERQPD_YMMf64_MEMf64_YMMi64_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_i64gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float const*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VGATHERQPS" form="xmm, vm64x, xmm" xed="VGATHERQPS_XMMf32_MEMf32_XMMi32_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_i64gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float const*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERQPS" form="xmm, vm64y, xmm" xed="VGATHERQPS_XMMf32_MEMf32_XMMi32_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_i64gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="int const*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPGATHERQD" form="xmm, vm64x, xmm" xed="VPGATHERQD_XMMu32_MEMd_XMMi32_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_i64gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="int const*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERQD" form="xmm, vm64y, xmm" xed="VPGATHERQD_XMMu32_MEMd_XMMi32_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_i64gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__int64 const*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERQQ" form="xmm, vm64x, xmm" xed="VPGATHERQQ_XMMu64_MEMq_XMMi64_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_i64gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__int64 const*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPGATHERQQ" form="ymm, vm64x, ymm" xed="VPGATHERQQ_YMMu64_MEMq_YMMi64_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_inserti128_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 128 bits (composed of integer data) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTI128" form="ymm, ymm, xmm, imm8" xed="VINSERTI128_YMMqq_YMMqq_XMMdq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_madd_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADDWD" form="ymm, ymm, ymm" xed="VPMADDWD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_maddubs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] )
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADDUBSW" form="ymm, ymm, ymm" xed="VPMADDUBSW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_mask_i32gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="double const*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128d" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*32
+ IF mask[i+63]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+mask[MAX:128] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="xmm, vm32x, xmm" xed="VGATHERDPD_XMMf64_MEMf64_XMMi64_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mask_i32gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="double const*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256d" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*32
+ IF mask[i+63]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+mask[MAX:256] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="ymm, vm32x, ymm" xed="VGATHERDPD_YMMf64_MEMf64_YMMi64_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_mask_i32gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="float const*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*32
+ IF mask[i+31]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+mask[MAX:128] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERDPS" form="xmm, vm32x, xmm" xed="VGATHERDPS_XMMf32_MEMf32_XMMi32_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mask_i32gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="float const*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*32
+ IF mask[i+31]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+mask[MAX:256] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGATHERDPS" form="ymm, vm32x, ymm" xed="VGATHERDPS_YMMf32_MEMf32_YMMi32_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_mask_i32gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="int const*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*32
+ IF mask[i+31]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+mask[MAX:128] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERDD" form="xmm, vm32x, xmm" xed="VPGATHERDD_XMMu32_MEMd_XMMi32_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mask_i32gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="int const*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*32
+ IF mask[i+31]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+mask[MAX:256] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPGATHERDD" form="ymm, vm32x, ymm" xed="VPGATHERDD_YMMu32_MEMd_YMMi32_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_mask_i32gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__int64 const*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*32
+ IF mask[i+63]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+mask[MAX:128] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="xmm, vm32x, xmm" xed="VPGATHERDQ_XMMu64_MEMq_XMMi64_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mask_i32gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__int64 const*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*32
+ IF mask[i+63]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+mask[MAX:256] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="ymm, vm32x, ymm" xed="VPGATHERDQ_YMMu64_MEMq_YMMi64_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_mask_i64gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="double const*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128d" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*64
+ IF mask[i+63]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+mask[MAX:128] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERQPD" form="xmm, vm64x, xmm" xed="VGATHERQPD_XMMf64_MEMf64_XMMi64_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mask_i64gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="double const*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m256d" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*64
+ IF mask[i+63]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+mask[MAX:256] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGATHERQPD" form="ymm, vm64x, ymm" xed="VGATHERQPD_YMMf64_MEMf64_YMMi64_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_mask_i64gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="float const*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ IF mask[i+31]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+mask[MAX:64] := 0
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VGATHERQPS" form="xmm, vm64x, xmm" xed="VGATHERQPS_XMMf32_MEMf32_XMMi32_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mask_i64gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="float const*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ IF mask[i+31]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+mask[MAX:128] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERQPS" form="xmm, vm64y, xmm" xed="VGATHERQPS_XMMf32_MEMf32_XMMi32_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_mask_i64gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="int const*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ IF mask[i+31]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+mask[MAX:64] := 0
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPGATHERQD" form="xmm, vm64x, xmm" xed="VPGATHERQD_XMMu32_MEMd_XMMi32_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mask_i64gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="int const*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ IF mask[i+31]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+mask[MAX:128] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERQD" form="xmm, vm64y, xmm" xed="VPGATHERQD_XMMu32_MEMd_XMMi32_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_mask_i64gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__int64 const*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*64
+ IF mask[i+63]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+mask[MAX:128] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERQQ" form="xmm, vm64x, xmm" xed="VPGATHERQQ_XMMu64_MEMq_XMMi64_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mask_i64gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__int64 const*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using "mask" (elements are copied from "src" when the highest bit is not set in the corresponding element). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*64
+ IF mask[i+63]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+mask[MAX:256] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPGATHERQQ" form="ymm, vm64x, ymm" xed="VPGATHERQQ_YMMu64_MEMq_YMMi64_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_maskload_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="int const*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <description>Load packed 32-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF mask[i+31]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMASKMOVD" form="xmm, xmm, m128" xed="VPMASKMOVD_XMMdq_XMMdq_MEMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_maskload_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="int const*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <description>Load packed 32-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF mask[i+31]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMASKMOVD" form="ymm, ymm, m256" xed="VPMASKMOVD_YMMqq_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_maskload_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__int64 const*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <description>Load packed 64-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF mask[i+63]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMASKMOVQ" form="xmm, xmm, m128" xed="VPMASKMOVQ_XMMdq_XMMdq_MEMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_maskload_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__int64 const*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <description>Load packed 64-bit integers from memory into "dst" using "mask" (elements are zeroed out when the highest bit is not set in the corresponding element).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF mask[i+63]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMASKMOVQ" form="ymm, ymm, m256" xed="VPMASKMOVQ_YMMqq_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_maskstore_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="int*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Store packed 32-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF mask[i+31]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMASKMOVD" form="m128, xmm, xmm" xed="VPMASKMOVD_MEMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_maskstore_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="int*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Store packed 32-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF mask[i+31]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMASKMOVD" form="m256, ymm, ymm" xed="VPMASKMOVD_MEMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_maskstore_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__int64*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <parameter type="__m128i" varname="mask" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Store packed 64-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF mask[i+63]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMASKMOVQ" form="m128, xmm, xmm" xed="VPMASKMOVQ_MEMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_maskstore_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__int64*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <parameter type="__m256i" varname="mask" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Store packed 64-bit integers from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF mask[i+63]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMASKMOVQ" form="m256, ymm, ymm" xed="VPMASKMOVQ_MEMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_max_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSB" form="ymm, ymm, ymm" xed="VPMAXSB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_max_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSW" form="ymm, ymm, ymm" xed="VPMAXSW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_max_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSD" form="ymm, ymm, ymm" xed="VPMAXSD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_max_epu8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUB" form="ymm, ymm, ymm" xed="VPMAXUB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_max_epu16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUW" form="ymm, ymm, ymm" xed="VPMAXUW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_max_epu32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUD" form="ymm, ymm, ymm" xed="VPMAXUD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_min_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSB" form="ymm, ymm, ymm" xed="VPMINSB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_min_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSW" form="ymm, ymm, ymm" xed="VPMINSW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_min_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSD" form="ymm, ymm, ymm" xed="VPMINSD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_min_epu8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUB" form="ymm, ymm, ymm" xed="VPMINUB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_min_epu16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUW" form="ymm, ymm, ymm" xed="VPMINUW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_min_epu32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUD" form="ymm, ymm, ymm" xed="VPMINUD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_movemask_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[j] := a[i+7]
+ENDFOR
+ </operation>
+ <instruction name="VPMOVMSKB" form="r32, ymm" xed="VPMOVMSKB_GPR32d_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mpsadbw_epu8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst".
+ Eight SADs are performed for each 128-bit lane using one quadruplet from "b" and eight quadruplets from "a". One quadruplet is selected from "b" starting at on the offset specified in "imm8". Eight quadruplets are formed from sequential 8-bit integers selected from "a" starting at the offset specified in "imm8".</description>
+ <operation>
+DEFINE MPSADBW(a[127:0], b[127:0], imm8[2:0]) {
+ a_offset := imm8[2]*32
+ b_offset := imm8[1:0]*32
+ FOR j := 0 to 7
+ i := j*8
+ k := a_offset+i
+ l := b_offset
+ tmp[i*2+15:i*2] := ABS(Signed(a[k+7:k] - b[l+7:l])) + ABS(Signed(a[k+15:k+8] - b[l+15:l+8])) + \
+ ABS(Signed(a[k+23:k+16] - b[l+23:l+16])) + ABS(Signed(a[k+31:k+24] - b[l+31:l+24]))
+ ENDFOR
+ RETURN tmp[127:0]
+}
+dst[127:0] := MPSADBW(a[127:0], b[127:0], imm8[2:0])
+dst[255:128] := MPSADBW(a[255:128], b[255:128], imm8[5:3])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMPSADBW" form="ymm, ymm, ymm, imm8" xed="VMPSADBW_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mul_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULDQ" form="ymm, ymm, ymm" xed="VPMULDQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mul_epu32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[i+31:i] * b[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULUDQ" form="ymm, ymm, ymm" xed="VPMULUDQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mulhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULHW" form="ymm, ymm, ymm" xed="VPMULHW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mulhi_epu16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULHUW" form="ymm, ymm, ymm" xed="VPMULHUW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mulhrs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) &gt;&gt; 14) + 1
+ dst[i+15:i] := tmp[16:1]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULHRSW" form="ymm, ymm, ymm" xed="VPMULHRSW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mullo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[15:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULLW" form="ymm, ymm, ymm" xed="VPMULLW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_mullo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Multiply the packed signed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ tmp[63:0] := a[i+31:i] * b[i+31:i]
+ dst[i+31:i] := tmp[31:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULLD" form="ymm, ymm, ymm" xed="VPMULLD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_or_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <parameter type="__m256i" varname="b" etype="M256"/>
+ <description>Compute the bitwise OR of 256 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[255:0] := (a[255:0] OR b[255:0])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOR" form="ymm, ymm, ymm" xed="VPOR_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_packs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="SI8"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst".</description>
+ <operation>
+dst[7:0] := Saturate8(a[15:0])
+dst[15:8] := Saturate8(a[31:16])
+dst[23:16] := Saturate8(a[47:32])
+dst[31:24] := Saturate8(a[63:48])
+dst[39:32] := Saturate8(a[79:64])
+dst[47:40] := Saturate8(a[95:80])
+dst[55:48] := Saturate8(a[111:96])
+dst[63:56] := Saturate8(a[127:112])
+dst[71:64] := Saturate8(b[15:0])
+dst[79:72] := Saturate8(b[31:16])
+dst[87:80] := Saturate8(b[47:32])
+dst[95:88] := Saturate8(b[63:48])
+dst[103:96] := Saturate8(b[79:64])
+dst[111:104] := Saturate8(b[95:80])
+dst[119:112] := Saturate8(b[111:96])
+dst[127:120] := Saturate8(b[127:112])
+dst[135:128] := Saturate8(a[143:128])
+dst[143:136] := Saturate8(a[159:144])
+dst[151:144] := Saturate8(a[175:160])
+dst[159:152] := Saturate8(a[191:176])
+dst[167:160] := Saturate8(a[207:192])
+dst[175:168] := Saturate8(a[223:208])
+dst[183:176] := Saturate8(a[239:224])
+dst[191:184] := Saturate8(a[255:240])
+dst[199:192] := Saturate8(b[143:128])
+dst[207:200] := Saturate8(b[159:144])
+dst[215:208] := Saturate8(b[175:160])
+dst[223:216] := Saturate8(b[191:176])
+dst[231:224] := Saturate8(b[207:192])
+dst[239:232] := Saturate8(b[223:208])
+dst[247:240] := Saturate8(b[239:224])
+dst[255:248] := Saturate8(b[255:240])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKSSWB" form="ymm, ymm, ymm" xed="VPACKSSWB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_packs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst".</description>
+ <operation>
+dst[15:0] := Saturate16(a[31:0])
+dst[31:16] := Saturate16(a[63:32])
+dst[47:32] := Saturate16(a[95:64])
+dst[63:48] := Saturate16(a[127:96])
+dst[79:64] := Saturate16(b[31:0])
+dst[95:80] := Saturate16(b[63:32])
+dst[111:96] := Saturate16(b[95:64])
+dst[127:112] := Saturate16(b[127:96])
+dst[143:128] := Saturate16(a[159:128])
+dst[159:144] := Saturate16(a[191:160])
+dst[175:160] := Saturate16(a[223:192])
+dst[191:176] := Saturate16(a[255:224])
+dst[207:192] := Saturate16(b[159:128])
+dst[223:208] := Saturate16(b[191:160])
+dst[239:224] := Saturate16(b[223:192])
+dst[255:240] := Saturate16(b[255:224])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKSSDW" form="ymm, ymm, ymm" xed="VPACKSSDW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_packus_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst".</description>
+ <operation>
+dst[7:0] := SaturateU8(a[15:0])
+dst[15:8] := SaturateU8(a[31:16])
+dst[23:16] := SaturateU8(a[47:32])
+dst[31:24] := SaturateU8(a[63:48])
+dst[39:32] := SaturateU8(a[79:64])
+dst[47:40] := SaturateU8(a[95:80])
+dst[55:48] := SaturateU8(a[111:96])
+dst[63:56] := SaturateU8(a[127:112])
+dst[71:64] := SaturateU8(b[15:0])
+dst[79:72] := SaturateU8(b[31:16])
+dst[87:80] := SaturateU8(b[47:32])
+dst[95:88] := SaturateU8(b[63:48])
+dst[103:96] := SaturateU8(b[79:64])
+dst[111:104] := SaturateU8(b[95:80])
+dst[119:112] := SaturateU8(b[111:96])
+dst[127:120] := SaturateU8(b[127:112])
+dst[135:128] := SaturateU8(a[143:128])
+dst[143:136] := SaturateU8(a[159:144])
+dst[151:144] := SaturateU8(a[175:160])
+dst[159:152] := SaturateU8(a[191:176])
+dst[167:160] := SaturateU8(a[207:192])
+dst[175:168] := SaturateU8(a[223:208])
+dst[183:176] := SaturateU8(a[239:224])
+dst[191:184] := SaturateU8(a[255:240])
+dst[199:192] := SaturateU8(b[143:128])
+dst[207:200] := SaturateU8(b[159:144])
+dst[215:208] := SaturateU8(b[175:160])
+dst[223:216] := SaturateU8(b[191:176])
+dst[231:224] := SaturateU8(b[207:192])
+dst[239:232] := SaturateU8(b[223:208])
+dst[247:240] := SaturateU8(b[239:224])
+dst[255:248] := SaturateU8(b[255:240])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKUSWB" form="ymm, ymm, ymm" xed="VPACKUSWB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_packus_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst".</description>
+ <operation>
+dst[15:0] := SaturateU16(a[31:0])
+dst[31:16] := SaturateU16(a[63:32])
+dst[47:32] := SaturateU16(a[95:64])
+dst[63:48] := SaturateU16(a[127:96])
+dst[79:64] := SaturateU16(b[31:0])
+dst[95:80] := SaturateU16(b[63:32])
+dst[111:96] := SaturateU16(b[95:64])
+dst[127:112] := SaturateU16(b[127:96])
+dst[143:128] := SaturateU16(a[159:128])
+dst[159:144] := SaturateU16(a[191:160])
+dst[175:160] := SaturateU16(a[223:192])
+dst[191:176] := SaturateU16(a[255:224])
+dst[207:192] := SaturateU16(b[159:128])
+dst[223:208] := SaturateU16(b[191:160])
+dst[239:224] := SaturateU16(b[223:192])
+dst[255:240] := SaturateU16(b[255:224])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKUSDW" form="ymm, ymm, ymm" xed="VPACKUSDW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_permute2x128_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <parameter type="__m256i" varname="b" etype="M256"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of integer data) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src1, src2, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src1[127:0]
+ 1: tmp[127:0] := src1[255:128]
+ 2: tmp[127:0] := src2[127:0]
+ 3: tmp[127:0] := src2[255:128]
+ ESAC
+ IF control[3]
+ tmp[127:0] := 0
+ FI
+ RETURN tmp[127:0]
+}
+dst[127:0] := SELECT4(a[255:0], b[255:0], imm8[3:0])
+dst[255:128] := SELECT4(a[255:0], b[255:0], imm8[7:4])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERM2I128" form="ymm, ymm, ymm, imm8" xed="VPERM2I128_YMMqq_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_permute4x64_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMQ" form="ymm, ymm, imm8" xed="VPERMQ_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_permute4x64_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMPD" form="ymm, ymm, imm8" xed="VPERMPD_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_permutevar8x32_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ id := idx[i+2:i]*32
+ dst[i+31:i] := a[id+31:id]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMD" form="ymm, ymm, ymm" xed="VPERMD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_permutevar8x32_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ id := idx[i+2:i]*32
+ dst[i+31:i] := a[id+31:id]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMPS" form="ymm, ymm, ymm" xed="VPERMPS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sad_epu8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce four unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit elements in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i])
+ENDFOR
+FOR j := 0 to 3
+ i := j*64
+ dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + \
+ tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56]
+ dst[i+63:i+16] := 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSADBW" form="ymm, ymm, ymm" xed="VPSADBW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_shuffle_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFD" form="ymm, ymm, imm8" xed="VPSHUFD_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_shuffle_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" within 128-bit lanes according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF b[i+7] == 1
+ dst[i+7:i] := 0
+ ELSE
+ index[3:0] := b[i+3:i]
+ dst[i+7:i] := a[index*8+7:index*8]
+ FI
+ IF b[128+i+7] == 1
+ dst[128+i+7:128+i] := 0
+ ELSE
+ index[3:0] := b[128+i+3:128+i]
+ dst[128+i+7:128+i] := a[128+index*8+7:128+index*8]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFB" form="ymm, ymm, ymm" xed="VPSHUFB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_shufflehi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[79:64] := (a &gt;&gt; (imm8[1:0] * 16))[79:64]
+dst[95:80] := (a &gt;&gt; (imm8[3:2] * 16))[79:64]
+dst[111:96] := (a &gt;&gt; (imm8[5:4] * 16))[79:64]
+dst[127:112] := (a &gt;&gt; (imm8[7:6] * 16))[79:64]
+dst[191:128] := a[191:128]
+dst[207:192] := (a &gt;&gt; (imm8[1:0] * 16))[207:192]
+dst[223:208] := (a &gt;&gt; (imm8[3:2] * 16))[207:192]
+dst[239:224] := (a &gt;&gt; (imm8[5:4] * 16))[207:192]
+dst[255:240] := (a &gt;&gt; (imm8[7:6] * 16))[207:192]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFHW" form="ymm, ymm, imm8" xed="VPSHUFHW_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_shufflelo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst".</description>
+ <operation>
+dst[15:0] := (a &gt;&gt; (imm8[1:0] * 16))[15:0]
+dst[31:16] := (a &gt;&gt; (imm8[3:2] * 16))[15:0]
+dst[47:32] := (a &gt;&gt; (imm8[5:4] * 16))[15:0]
+dst[63:48] := (a &gt;&gt; (imm8[7:6] * 16))[15:0]
+dst[127:64] := a[127:64]
+dst[143:128] := (a &gt;&gt; (imm8[1:0] * 16))[143:128]
+dst[159:144] := (a &gt;&gt; (imm8[3:2] * 16))[143:128]
+dst[175:160] := (a &gt;&gt; (imm8[5:4] * 16))[143:128]
+dst[191:176] := (a &gt;&gt; (imm8[7:6] * 16))[143:128]
+dst[255:192] := a[255:192]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFLW" form="ymm, ymm, imm8" xed="VPSHUFLW_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sign_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Negate packed signed 8-bit integers in "a" when the corresponding signed 8-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF b[i+7:i] &lt; 0
+ dst[i+7:i] := -(a[i+7:i])
+ ELSE IF b[i+7:i] == 0
+ dst[i+7:i] := 0
+ ELSE
+ dst[i+7:i] := a[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSIGNB" form="ymm, ymm, ymm" xed="VPSIGNB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sign_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Negate packed signed 16-bit integers in "a" when the corresponding signed 16-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF b[i+15:i] &lt; 0
+ dst[i+15:i] := -(a[i+15:i])
+ ELSE IF b[i+15:i] == 0
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSIGNW" form="ymm, ymm, ymm" xed="VPSIGNW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sign_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Negate packed signed 32-bit integers in "a" when the corresponding signed 32-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF b[i+31:i] &lt; 0
+ dst[i+31:i] := -(a[i+31:i])
+ ELSE IF b[i+31:i] == 0
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSIGND" form="ymm, ymm, ymm" xed="VPSIGND_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_slli_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+tmp := imm8[7:0]
+IF tmp &gt; 15
+ tmp := 16
+FI
+dst[127:0] := a[127:0] &lt;&lt; (tmp*8)
+dst[255:128] := a[255:128] &lt;&lt; (tmp*8)
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLDQ" form="ymm, ymm, imm8" xed="VPSLLDQ_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_bslli_epi128">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+tmp := imm8[7:0]
+IF tmp &gt; 15
+ tmp := 16
+FI
+dst[127:0] := a[127:0] &lt;&lt; (tmp*8)
+dst[255:128] := a[255:128] &lt;&lt; (tmp*8)
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLDQ" form="ymm, ymm, imm8" xed="VPSLLDQ_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sll_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLW" form="ymm, ymm, xmm" xed="VPSLLW_YMMqq_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_slli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLW" form="ymm, ymm, imm8" xed="VPSLLW_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sll_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLD" form="ymm, ymm, xmm" xed="VPSLLD_YMMqq_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_slli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLD" form="ymm, ymm, imm8" xed="VPSLLD_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sll_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="ymm, ymm, xmm" xed="VPSLLQ_YMMqq_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_slli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="ymm, ymm, imm8" xed="VPSLLQ_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_sllv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLVD" form="xmm, xmm, xmm" xed="VPSLLVD_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sllv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLVD" form="ymm, ymm, ymm" xed="VPSLLVD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_sllv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLVQ" form="xmm, xmm, xmm" xed="VPSLLVQ_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sllv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLVQ" form="ymm, ymm, ymm" xed="VPSLLVQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sra_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAW" form="ymm, ymm, xmm" xed="VPSRAW_YMMqq_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srai_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAW" form="ymm, ymm, imm8" xed="VPSRAW_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sra_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAD" form="ymm, ymm, xmm" xed="VPSRAD_YMMqq_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srai_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAD" form="ymm, ymm, imm8" xed="VPSRAD_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_srav_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0)
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAVD" form="xmm, xmm, xmm" xed="VPSRAVD_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srav_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0)
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAVD" form="ymm, ymm, ymm" xed="VPSRAVD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srli_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+tmp := imm8[7:0]
+IF tmp &gt; 15
+ tmp := 16
+FI
+dst[127:0] := a[127:0] &gt;&gt; (tmp*8)
+dst[255:128] := a[255:128] &gt;&gt; (tmp*8)
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLDQ" form="ymm, ymm, imm8" xed="VPSRLDQ_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_bsrli_epi128">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+tmp := imm8[7:0]
+IF tmp &gt; 15
+ tmp := 16
+FI
+dst[127:0] := a[127:0] &gt;&gt; (tmp*8)
+dst[255:128] := a[255:128] &gt;&gt; (tmp*8)
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLDQ" form="ymm, ymm, imm8" xed="VPSRLDQ_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srl_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLW" form="ymm, ymm, xmm" xed="VPSRLW_YMMqq_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLW" form="ymm, ymm, imm8" xed="VPSRLW_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srl_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLD" form="ymm, ymm, xmm" xed="VPSRLD_YMMqq_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLD" form="ymm, ymm, imm8" xed="VPSRLD_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srl_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="ymm, ymm, xmm" xed="VPSRLQ_YMMqq_YMMqq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="ymm, ymm, imm8" xed="VPSRLQ_YMMqq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_srlv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLVD" form="xmm, xmm, xmm" xed="VPSRLVD_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srlv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLVD" form="ymm, ymm, ymm" xed="VPSRLVD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm_srlv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLVQ" form="xmm, xmm, xmm" xed="VPSRLVQ_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_srlv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLVQ" form="ymm, ymm, ymm" xed="VPSRLVQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_stream_load_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256i const*" varname="mem_addr" etype="M256" memwidth="256"/>
+ <description>Load 256-bits of integer data from memory into "dst" using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVNTDQA" form="ymm, m256" xed="VMOVNTDQA_YMMqq_MEMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sub_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := a[i+7:i] - b[i+7:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBB" form="ymm, ymm, ymm" xed="VPSUBB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sub_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := a[i+15:i] - b[i+15:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBW" form="ymm, ymm, ymm" xed="VPSUBW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sub_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBD" form="ymm, ymm, ymm" xed="VPSUBD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_sub_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBQ" form="ymm, ymm, ymm" xed="VPSUBQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_subs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBSB" form="ymm, ymm, ymm" xed="VPSUBSB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_subs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBSW" form="ymm, ymm, ymm" xed="VPSUBSW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_subs_epu8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBUSB" form="ymm, ymm, ymm" xed="VPSUBUSB_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_subs_epu16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBUSW" form="ymm, ymm, ymm" xed="VPSUBUSW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_xor_si256">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m256i" varname="a" etype="M256"/>
+ <parameter type="__m256i" varname="b" etype="M256"/>
+ <description>Compute the bitwise XOR of 256 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[255:0] := (a[255:0] XOR b[255:0])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPXOR" form="ymm, ymm, ymm" xed="VPXOR_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_unpackhi_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[71:64]
+ dst[15:8] := src2[71:64]
+ dst[23:16] := src1[79:72]
+ dst[31:24] := src2[79:72]
+ dst[39:32] := src1[87:80]
+ dst[47:40] := src2[87:80]
+ dst[55:48] := src1[95:88]
+ dst[63:56] := src2[95:88]
+ dst[71:64] := src1[103:96]
+ dst[79:72] := src2[103:96]
+ dst[87:80] := src1[111:104]
+ dst[95:88] := src2[111:104]
+ dst[103:96] := src1[119:112]
+ dst[111:104] := src2[119:112]
+ dst[119:112] := src1[127:120]
+ dst[127:120] := src2[127:120]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHBW" form="ymm, ymm, ymm" xed="VPUNPCKHBW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_unpackhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[79:64]
+ dst[31:16] := src2[79:64]
+ dst[47:32] := src1[95:80]
+ dst[63:48] := src2[95:80]
+ dst[79:64] := src1[111:96]
+ dst[95:80] := src2[111:96]
+ dst[111:96] := src1[127:112]
+ dst[127:112] := src2[127:112]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHWD" form="ymm, ymm, ymm" xed="VPUNPCKHWD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_unpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHDQ" form="ymm, ymm, ymm" xed="VPUNPCKHDQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_unpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHQDQ" form="ymm, ymm, ymm" xed="VPUNPCKHQDQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_unpacklo_epi8">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[7:0]
+ dst[15:8] := src2[7:0]
+ dst[23:16] := src1[15:8]
+ dst[31:24] := src2[15:8]
+ dst[39:32] := src1[23:16]
+ dst[47:40] := src2[23:16]
+ dst[55:48] := src1[31:24]
+ dst[63:56] := src2[31:24]
+ dst[71:64] := src1[39:32]
+ dst[79:72] := src2[39:32]
+ dst[87:80] := src1[47:40]
+ dst[95:88] := src2[47:40]
+ dst[103:96] := src1[55:48]
+ dst[111:104] := src2[55:48]
+ dst[119:112] := src1[63:56]
+ dst[127:120] := src2[63:56]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLBW" form="ymm, ymm, ymm" xed="VPUNPCKLBW_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_unpacklo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[15:0]
+ dst[31:16] := src2[15:0]
+ dst[47:32] := src1[31:16]
+ dst[63:48] := src2[31:16]
+ dst[79:64] := src1[47:32]
+ dst[95:80] := src2[47:32]
+ dst[111:96] := src1[63:48]
+ dst[127:112] := src2[63:48]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLWD" form="ymm, ymm, ymm" xed="VPUNPCKLWD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_unpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLDQ" form="ymm, ymm, ymm" xed="VPUNPCKLDQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX2" name="_mm256_unpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLQDQ" form="ymm, ymm, ymm" xed="VPUNPCKLQDQ_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_kunpackd">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask64" varname="dst" etype="MASK"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <description>Unpack and interleave 32 bits from masks "a" and "b", and store the 64-bit result in "dst".</description>
+ <operation>
+dst[31:0] := b[31:0]
+dst[63:32] := a[31:0]
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="KUNPCKDQ" form="k, k, k" xed="KUNPCKDQ_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_kunpackw">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask32" varname="dst" etype="MASK"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <description>Unpack and interleave 16 bits from masks "a" and "b", and store the 32-bit result in "dst".</description>
+ <operation>
+dst[15:0] := b[15:0]
+dst[31:16] := a[15:0]
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="KUNPCKWD" form="k, k, k" xed="KUNPCKWD_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_dbsad_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst".
+ Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.</description>
+ <operation>
+FOR i := 0 to 1
+ tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ]
+ tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ]
+ tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ]
+ tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ]
+ENDFOR
+FOR j := 0 to 3
+ i := j*64
+ dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\
+ ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24])
+
+ dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\
+ ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32])
+
+ dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\
+ ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40])
+
+ dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\
+ ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDBPSADBW" form="ymm, ymm, ymm, imm8" xed="VDBPSADBW_YMMu16_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_dbsad_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.</description>
+ <operation>
+FOR i := 0 to 1
+ tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ]
+ tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ]
+ tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ]
+ tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ]
+ENDFOR
+FOR j := 0 to 3
+ i := j*64
+ tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\
+ ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24])
+
+ tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\
+ ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32])
+
+ tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\
+ ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40])
+
+ tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\
+ ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48])
+ENDFOR
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDBPSADBW" form="ymm {k}, ymm, ymm, imm8" xed="VDBPSADBW_YMMu16_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_dbsad_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.</description>
+ <operation>
+FOR i := 0 to 1
+ tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ]
+ tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ]
+ tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ]
+ tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ]
+ENDFOR
+FOR j := 0 to 3
+ i := j*64
+ tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\
+ ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24])
+
+ tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\
+ ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32])
+
+ tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\
+ ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40])
+
+ tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\
+ ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48])
+ENDFOR
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDBPSADBW" form="ymm {z}, ymm, ymm, imm8" xed="VDBPSADBW_YMMu16_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_dbsad_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst".
+ Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.</description>
+ <operation>
+FOR i := 0 to 3
+ tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ]
+ tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ]
+ tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ]
+ tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ]
+ENDFOR
+FOR j := 0 to 7
+ i := j*64
+ dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\
+ ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24])
+
+ dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\
+ ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32])
+
+ dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\
+ ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40])
+
+ dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\
+ ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDBPSADBW" form="zmm, zmm, zmm, imm8" xed="VDBPSADBW_ZMMu16_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_dbsad_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.</description>
+ <operation>
+FOR i := 0 to 3
+ tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ]
+ tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ]
+ tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ]
+ tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ]
+ENDFOR
+FOR j := 0 to 7
+ i := j*64
+ tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\
+ ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24])
+
+ tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\
+ ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32])
+
+ tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\
+ ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40])
+
+ tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\
+ ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48])
+ENDFOR
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDBPSADBW" form="zmm {k}, zmm, zmm, imm8" xed="VDBPSADBW_ZMMu16_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_dbsad_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected from within 128-bit lanes according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.</description>
+ <operation>
+FOR i := 0 to 3
+ tmp.m128[i].dword[0] := b.m128[i].dword[ imm8[1:0] ]
+ tmp.m128[i].dword[1] := b.m128[i].dword[ imm8[3:2] ]
+ tmp.m128[i].dword[2] := b.m128[i].dword[ imm8[5:4] ]
+ tmp.m128[i].dword[3] := b.m128[i].dword[ imm8[7:6] ]
+ENDFOR
+FOR j := 0 to 7
+ i := j*64
+ tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\
+ ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24])
+
+ tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\
+ ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32])
+
+ tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\
+ ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40])
+
+ tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\
+ ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48])
+ENDFOR
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDBPSADBW" form="zmm {z}, zmm, zmm, imm8" xed="VDBPSADBW_ZMMu16_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_dbsad_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst".
+ Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.</description>
+ <operation>
+tmp.dword[0] := b.dword[ imm8[1:0] ]
+tmp.dword[1] := b.dword[ imm8[3:2] ]
+tmp.dword[2] := b.dword[ imm8[5:4] ]
+tmp.dword[3] := b.dword[ imm8[7:6] ]
+FOR j := 0 to 1
+ i := j*64
+ dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\
+ ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24])
+
+ dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\
+ ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32])
+
+ dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\
+ ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40])
+
+ dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\
+ ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDBPSADBW" form="xmm, xmm, xmm, imm8" xed="VDBPSADBW_XMMu16_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_dbsad_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.</description>
+ <operation>
+tmp.dword[0] := b.dword[ imm8[1:0] ]
+tmp.dword[1] := b.dword[ imm8[3:2] ]
+tmp.dword[2] := b.dword[ imm8[5:4] ]
+tmp.dword[3] := b.dword[ imm8[7:6] ]
+FOR j := 0 to 1
+ i := j*64
+ tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\
+ ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24])
+
+ tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\
+ ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32])
+
+ tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\
+ ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40])
+
+ tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\
+ ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48])
+ENDFOR
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDBPSADBW" form="xmm {k}, xmm, xmm, imm8" xed="VDBPSADBW_XMMu16_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_dbsad_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ Four SADs are performed on four 8-bit quadruplets for each 64-bit lane. The first two SADs use the lower 8-bit quadruplet of the lane from "a", and the last two SADs use the uppper 8-bit quadruplet of the lane from "a". Quadruplets from "b" are selected according to the control in "imm8", and each SAD in each 64-bit lane uses the selected quadruplet at 8-bit offsets.</description>
+ <operation>
+tmp.dword[0] := b.dword[ imm8[1:0] ]
+tmp.dword[1] := b.dword[ imm8[3:2] ]
+tmp.dword[2] := b.dword[ imm8[5:4] ]
+tmp.dword[3] := b.dword[ imm8[7:6] ]
+FOR j := 0 to 1
+ i := j*64
+ tmp_dst[i+15:i] := ABS(a[i+7:i] - tmp[i+7:i]) + ABS(a[i+15:i+8] - tmp[i+15:i+8]) +\
+ ABS(a[i+23:i+16] - tmp[i+23:i+16]) + ABS(a[i+31:i+24] - tmp[i+31:i+24])
+
+ tmp_dst[i+31:i+16] := ABS(a[i+7:i] - tmp[i+15:i+8]) + ABS(a[i+15:i+8] - tmp[i+23:i+16]) +\
+ ABS(a[i+23:i+16] - tmp[i+31:i+24]) + ABS(a[i+31:i+24] - tmp[i+39:i+32])
+
+ tmp_dst[i+47:i+32] := ABS(a[i+39:i+32] - tmp[i+23:i+16]) + ABS(a[i+47:i+40] - tmp[i+31:i+24]) +\
+ ABS(a[i+55:i+48] - tmp[i+39:i+32]) + ABS(a[i+63:i+56] - tmp[i+47:i+40])
+
+ tmp_dst[i+63:i+48] := ABS(a[i+39:i+32] - tmp[i+31:i+24]) + ABS(a[i+47:i+40] - tmp[i+39:i+32]) +\
+ ABS(a[i+55:i+48] - tmp[i+47:i+40]) + ABS(a[i+63:i+56] - tmp[i+55:i+48])
+ENDFOR
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDBPSADBW" form="xmm {z}, xmm, xmm, imm8" xed="VDBPSADBW_XMMu16_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_loadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI16" memwidth="256"/>
+ <description>Load packed 16-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="ymm {k}, m256" xed="VMOVDQU16_YMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mov_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="ymm {k}, ymm" xed="VMOVDQU16_YMMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI16" memwidth="256"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Store packed 16-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU16" form="m256 {k}, ymm" xed="VMOVDQU16_MEMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_loadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI16" memwidth="256"/>
+ <description>Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="ymm {z}, m256" xed="VMOVDQU16_YMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mov_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="ymm {z}, ymm" xed="VMOVDQU16_YMMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_loadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI16" memwidth="512"/>
+ <description>Load packed 16-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="zmm {k}, m512" xed="VMOVDQU16_ZMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_mov_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="zmm {k}, zmm" xed="VMOVDQU16_ZMMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI16" memwidth="512"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Store packed 16-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU16" form="m512 {k}, zmm" xed="VMOVDQU16_MEMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_loadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI16" memwidth="512"/>
+ <description>Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="zmm {z}, m512" xed="VMOVDQU16_ZMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mov_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="zmm {z}, zmm" xed="VMOVDQU16_ZMMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_loadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI16" memwidth="128"/>
+ <description>Load packed 16-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="xmm {k}, m128" xed="VMOVDQU16_XMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mov_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Move packed 16-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="xmm {k}, xmm" xed="VMOVDQU16_XMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI16" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Store packed 16-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ MEM[mem_addr+i+15:mem_addr+i] := a[i+15:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU16" form="m128 {k}, xmm" xed="VMOVDQU16_MEMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_loadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI16" memwidth="128"/>
+ <description>Load packed 16-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+i+15:mem_addr+i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="xmm {z}, m128" xed="VMOVDQU16_XMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mov_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Move packed 16-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="xmm {z}, xmm" xed="VMOVDQU16_XMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_loadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI8" memwidth="256"/>
+ <description>Load packed 8-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="ymm {k}, m256" xed="VMOVDQU8_YMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mov_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="ymm {k}, ymm" xed="VMOVDQU8_YMMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI8" memwidth="256"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Store packed 8-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU8" form="m256 {k}, ymm" xed="VMOVDQU8_MEMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_loadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI8" memwidth="256"/>
+ <description>Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="ymm {z}, m256" xed="VMOVDQU8_YMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mov_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="ymm {z}, ymm" xed="VMOVDQU8_YMMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_loadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI8" memwidth="512"/>
+ <description>Load packed 8-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="zmm {k}, m512" xed="VMOVDQU8_ZMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_mov_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="zmm {k}, zmm" xed="VMOVDQU8_ZMMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI8" memwidth="512"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Store packed 8-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU8" form="m512 {k}, zmm" xed="VMOVDQU8_MEMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_loadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI8" memwidth="512"/>
+ <description>Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="zmm {z}, m512" xed="VMOVDQU8_ZMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mov_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="zmm {z}, zmm" xed="VMOVDQU8_ZMMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_loadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI8" memwidth="128"/>
+ <description>Load packed 8-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="xmm {k}, m128" xed="VMOVDQU8_XMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mov_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Move packed 8-bit integers from "a" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="xmm {k}, xmm" xed="VMOVDQU8_XMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI8" memwidth="128"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Store packed 8-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU8" form="m128 {k}, xmm" xed="VMOVDQU8_MEMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_loadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI8" memwidth="128"/>
+ <description>Load packed 8-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+i+7:mem_addr+i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="xmm {z}, m128" xed="VMOVDQU8_XMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mov_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Move</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Move packed 8-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="xmm {z}, xmm" xed="VMOVDQU8_XMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_abs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <description>Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := ABS(a[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSB" form="ymm {k}, ymm" xed="VPABSB_YMMi8_MASKmskw_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_abs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <description>Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := ABS(a[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSB" form="ymm {z}, ymm" xed="VPABSB_YMMi8_MASKmskw_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_abs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <description>Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := ABS(a[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSB" form="zmm, zmm" xed="VPABSB_ZMMi8_MASKmskw_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_abs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <description>Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := ABS(a[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSB" form="zmm {k}, zmm" xed="VPABSB_ZMMi8_MASKmskw_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_abs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <description>Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := ABS(a[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSB" form="zmm {z}, zmm" xed="VPABSB_ZMMi8_MASKmskw_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_abs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := ABS(a[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPABSB" form="xmm {k}, xmm" xed="VPABSB_XMMi8_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_abs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := ABS(a[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPABSB" form="xmm {z}, xmm" xed="VPABSB_XMMi8_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_abs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <description>Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ABS(a[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSW" form="ymm {k}, ymm" xed="VPABSW_YMMi16_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_abs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <description>Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ABS(a[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSW" form="ymm {z}, ymm" xed="VPABSW_YMMi16_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_abs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <description>Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := ABS(a[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSW" form="zmm, zmm" xed="VPABSW_ZMMi16_MASKmskw_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_abs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <description>Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ABS(a[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSW" form="zmm {k}, zmm" xed="VPABSW_ZMMi16_MASKmskw_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_abs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <description>Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ABS(a[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSW" form="zmm {z}, zmm" xed="VPABSW_ZMMi16_MASKmskw_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_abs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ABS(a[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPABSW" form="xmm {k}, xmm" xed="VPABSW_XMMi16_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_abs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ABS(a[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPABSW" form="xmm {z}, xmm" xed="VPABSW_XMMi16_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_packs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__m256i" varname="src" etype="SI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := Saturate16(a[31:0])
+tmp_dst[31:16] := Saturate16(a[63:32])
+tmp_dst[47:32] := Saturate16(a[95:64])
+tmp_dst[63:48] := Saturate16(a[127:96])
+tmp_dst[79:64] := Saturate16(b[31:0])
+tmp_dst[95:80] := Saturate16(b[63:32])
+tmp_dst[111:96] := Saturate16(b[95:64])
+tmp_dst[127:112] := Saturate16(b[127:96])
+tmp_dst[143:128] := Saturate16(a[159:128])
+tmp_dst[159:144] := Saturate16(a[191:160])
+tmp_dst[175:160] := Saturate16(a[223:192])
+tmp_dst[191:176] := Saturate16(a[255:224])
+tmp_dst[207:192] := Saturate16(b[159:128])
+tmp_dst[223:208] := Saturate16(b[191:160])
+tmp_dst[239:224] := Saturate16(b[223:192])
+tmp_dst[255:240] := Saturate16(b[255:224])
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKSSDW" form="ymm {k}, ymm, ymm" xed="VPACKSSDW_YMMi16_MASKmskw_YMMi32_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_packs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := Saturate16(a[31:0])
+tmp_dst[31:16] := Saturate16(a[63:32])
+tmp_dst[47:32] := Saturate16(a[95:64])
+tmp_dst[63:48] := Saturate16(a[127:96])
+tmp_dst[79:64] := Saturate16(b[31:0])
+tmp_dst[95:80] := Saturate16(b[63:32])
+tmp_dst[111:96] := Saturate16(b[95:64])
+tmp_dst[127:112] := Saturate16(b[127:96])
+tmp_dst[143:128] := Saturate16(a[159:128])
+tmp_dst[159:144] := Saturate16(a[191:160])
+tmp_dst[175:160] := Saturate16(a[223:192])
+tmp_dst[191:176] := Saturate16(a[255:224])
+tmp_dst[207:192] := Saturate16(b[159:128])
+tmp_dst[223:208] := Saturate16(b[191:160])
+tmp_dst[239:224] := Saturate16(b[223:192])
+tmp_dst[255:240] := Saturate16(b[255:224])
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKSSDW" form="ymm {z}, ymm, ymm" xed="VPACKSSDW_YMMi16_MASKmskw_YMMi32_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_packs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="SI16"/>
+ <parameter type="__m512i" varname="src" etype="SI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := Saturate16(a[31:0])
+tmp_dst[31:16] := Saturate16(a[63:32])
+tmp_dst[47:32] := Saturate16(a[95:64])
+tmp_dst[63:48] := Saturate16(a[127:96])
+tmp_dst[79:64] := Saturate16(b[31:0])
+tmp_dst[95:80] := Saturate16(b[63:32])
+tmp_dst[111:96] := Saturate16(b[95:64])
+tmp_dst[127:112] := Saturate16(b[127:96])
+tmp_dst[143:128] := Saturate16(a[159:128])
+tmp_dst[159:144] := Saturate16(a[191:160])
+tmp_dst[175:160] := Saturate16(a[223:192])
+tmp_dst[191:176] := Saturate16(a[255:224])
+tmp_dst[207:192] := Saturate16(b[159:128])
+tmp_dst[223:208] := Saturate16(b[191:160])
+tmp_dst[239:224] := Saturate16(b[223:192])
+tmp_dst[255:240] := Saturate16(b[255:224])
+tmp_dst[271:256] := Saturate16(a[287:256])
+tmp_dst[287:272] := Saturate16(a[319:288])
+tmp_dst[303:288] := Saturate16(a[351:320])
+tmp_dst[319:304] := Saturate16(a[383:352])
+tmp_dst[335:320] := Saturate16(b[287:256])
+tmp_dst[351:336] := Saturate16(b[319:288])
+tmp_dst[367:352] := Saturate16(b[351:320])
+tmp_dst[383:368] := Saturate16(b[383:352])
+tmp_dst[399:384] := Saturate16(a[415:384])
+tmp_dst[415:400] := Saturate16(a[447:416])
+tmp_dst[431:416] := Saturate16(a[479:448])
+tmp_dst[447:432] := Saturate16(a[511:480])
+tmp_dst[463:448] := Saturate16(b[415:384])
+tmp_dst[479:464] := Saturate16(b[447:416])
+tmp_dst[495:480] := Saturate16(b[479:448])
+tmp_dst[511:496] := Saturate16(b[511:480])
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSSDW" form="zmm {k}, zmm, zmm" xed="VPACKSSDW_ZMMi16_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_packs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := Saturate16(a[31:0])
+tmp_dst[31:16] := Saturate16(a[63:32])
+tmp_dst[47:32] := Saturate16(a[95:64])
+tmp_dst[63:48] := Saturate16(a[127:96])
+tmp_dst[79:64] := Saturate16(b[31:0])
+tmp_dst[95:80] := Saturate16(b[63:32])
+tmp_dst[111:96] := Saturate16(b[95:64])
+tmp_dst[127:112] := Saturate16(b[127:96])
+tmp_dst[143:128] := Saturate16(a[159:128])
+tmp_dst[159:144] := Saturate16(a[191:160])
+tmp_dst[175:160] := Saturate16(a[223:192])
+tmp_dst[191:176] := Saturate16(a[255:224])
+tmp_dst[207:192] := Saturate16(b[159:128])
+tmp_dst[223:208] := Saturate16(b[191:160])
+tmp_dst[239:224] := Saturate16(b[223:192])
+tmp_dst[255:240] := Saturate16(b[255:224])
+tmp_dst[271:256] := Saturate16(a[287:256])
+tmp_dst[287:272] := Saturate16(a[319:288])
+tmp_dst[303:288] := Saturate16(a[351:320])
+tmp_dst[319:304] := Saturate16(a[383:352])
+tmp_dst[335:320] := Saturate16(b[287:256])
+tmp_dst[351:336] := Saturate16(b[319:288])
+tmp_dst[367:352] := Saturate16(b[351:320])
+tmp_dst[383:368] := Saturate16(b[383:352])
+tmp_dst[399:384] := Saturate16(a[415:384])
+tmp_dst[415:400] := Saturate16(a[447:416])
+tmp_dst[431:416] := Saturate16(a[479:448])
+tmp_dst[447:432] := Saturate16(a[511:480])
+tmp_dst[463:448] := Saturate16(b[415:384])
+tmp_dst[479:464] := Saturate16(b[447:416])
+tmp_dst[495:480] := Saturate16(b[479:448])
+tmp_dst[511:496] := Saturate16(b[511:480])
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSSDW" form="zmm {z}, zmm, zmm" xed="VPACKSSDW_ZMMi16_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_packs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="SI16"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst".</description>
+ <operation>
+dst[15:0] := Saturate16(a[31:0])
+dst[31:16] := Saturate16(a[63:32])
+dst[47:32] := Saturate16(a[95:64])
+dst[63:48] := Saturate16(a[127:96])
+dst[79:64] := Saturate16(b[31:0])
+dst[95:80] := Saturate16(b[63:32])
+dst[111:96] := Saturate16(b[95:64])
+dst[127:112] := Saturate16(b[127:96])
+dst[143:128] := Saturate16(a[159:128])
+dst[159:144] := Saturate16(a[191:160])
+dst[175:160] := Saturate16(a[223:192])
+dst[191:176] := Saturate16(a[255:224])
+dst[207:192] := Saturate16(b[159:128])
+dst[223:208] := Saturate16(b[191:160])
+dst[239:224] := Saturate16(b[223:192])
+dst[255:240] := Saturate16(b[255:224])
+dst[271:256] := Saturate16(a[287:256])
+dst[287:272] := Saturate16(a[319:288])
+dst[303:288] := Saturate16(a[351:320])
+dst[319:304] := Saturate16(a[383:352])
+dst[335:320] := Saturate16(b[287:256])
+dst[351:336] := Saturate16(b[319:288])
+dst[367:352] := Saturate16(b[351:320])
+dst[383:368] := Saturate16(b[383:352])
+dst[399:384] := Saturate16(a[415:384])
+dst[415:400] := Saturate16(a[447:416])
+dst[431:416] := Saturate16(a[479:448])
+dst[447:432] := Saturate16(a[511:480])
+dst[463:448] := Saturate16(b[415:384])
+dst[479:464] := Saturate16(b[447:416])
+dst[495:480] := Saturate16(b[479:448])
+dst[511:496] := Saturate16(b[511:480])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSSDW" form="zmm, zmm, zmm" xed="VPACKSSDW_ZMMi16_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_packs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="src" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := Saturate16(a[31:0])
+tmp_dst[31:16] := Saturate16(a[63:32])
+tmp_dst[47:32] := Saturate16(a[95:64])
+tmp_dst[63:48] := Saturate16(a[127:96])
+tmp_dst[79:64] := Saturate16(b[31:0])
+tmp_dst[95:80] := Saturate16(b[63:32])
+tmp_dst[111:96] := Saturate16(b[95:64])
+tmp_dst[127:112] := Saturate16(b[127:96])
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPACKSSDW" form="xmm {k}, xmm, xmm" xed="VPACKSSDW_XMMi16_MASKmskw_XMMi32_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_packs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := Saturate16(a[31:0])
+tmp_dst[31:16] := Saturate16(a[63:32])
+tmp_dst[47:32] := Saturate16(a[95:64])
+tmp_dst[63:48] := Saturate16(a[127:96])
+tmp_dst[79:64] := Saturate16(b[31:0])
+tmp_dst[95:80] := Saturate16(b[63:32])
+tmp_dst[111:96] := Saturate16(b[95:64])
+tmp_dst[127:112] := Saturate16(b[127:96])
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPACKSSDW" form="xmm {z}, xmm, xmm" xed="VPACKSSDW_XMMi16_MASKmskw_XMMi32_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_packs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="SI8"/>
+ <parameter type="__m256i" varname="src" etype="SI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := Saturate8(a[15:0])
+tmp_dst[15:8] := Saturate8(a[31:16])
+tmp_dst[23:16] := Saturate8(a[47:32])
+tmp_dst[31:24] := Saturate8(a[63:48])
+tmp_dst[39:32] := Saturate8(a[79:64])
+tmp_dst[47:40] := Saturate8(a[95:80])
+tmp_dst[55:48] := Saturate8(a[111:96])
+tmp_dst[63:56] := Saturate8(a[127:112])
+tmp_dst[71:64] := Saturate8(b[15:0])
+tmp_dst[79:72] := Saturate8(b[31:16])
+tmp_dst[87:80] := Saturate8(b[47:32])
+tmp_dst[95:88] := Saturate8(b[63:48])
+tmp_dst[103:96] := Saturate8(b[79:64])
+tmp_dst[111:104] := Saturate8(b[95:80])
+tmp_dst[119:112] := Saturate8(b[111:96])
+tmp_dst[127:120] := Saturate8(b[127:112])
+tmp_dst[135:128] := Saturate8(a[143:128])
+tmp_dst[143:136] := Saturate8(a[159:144])
+tmp_dst[151:144] := Saturate8(a[175:160])
+tmp_dst[159:152] := Saturate8(a[191:176])
+tmp_dst[167:160] := Saturate8(a[207:192])
+tmp_dst[175:168] := Saturate8(a[223:208])
+tmp_dst[183:176] := Saturate8(a[239:224])
+tmp_dst[191:184] := Saturate8(a[255:240])
+tmp_dst[199:192] := Saturate8(b[143:128])
+tmp_dst[207:200] := Saturate8(b[159:144])
+tmp_dst[215:208] := Saturate8(b[175:160])
+tmp_dst[223:216] := Saturate8(b[191:176])
+tmp_dst[231:224] := Saturate8(b[207:192])
+tmp_dst[239:232] := Saturate8(b[223:208])
+tmp_dst[247:240] := Saturate8(b[239:224])
+tmp_dst[255:248] := Saturate8(b[255:240])
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKSSWB" form="ymm {k}, ymm, ymm" xed="VPACKSSWB_YMMi8_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_packs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := Saturate8(a[15:0])
+tmp_dst[15:8] := Saturate8(a[31:16])
+tmp_dst[23:16] := Saturate8(a[47:32])
+tmp_dst[31:24] := Saturate8(a[63:48])
+tmp_dst[39:32] := Saturate8(a[79:64])
+tmp_dst[47:40] := Saturate8(a[95:80])
+tmp_dst[55:48] := Saturate8(a[111:96])
+tmp_dst[63:56] := Saturate8(a[127:112])
+tmp_dst[71:64] := Saturate8(b[15:0])
+tmp_dst[79:72] := Saturate8(b[31:16])
+tmp_dst[87:80] := Saturate8(b[47:32])
+tmp_dst[95:88] := Saturate8(b[63:48])
+tmp_dst[103:96] := Saturate8(b[79:64])
+tmp_dst[111:104] := Saturate8(b[95:80])
+tmp_dst[119:112] := Saturate8(b[111:96])
+tmp_dst[127:120] := Saturate8(b[127:112])
+tmp_dst[135:128] := Saturate8(a[143:128])
+tmp_dst[143:136] := Saturate8(a[159:144])
+tmp_dst[151:144] := Saturate8(a[175:160])
+tmp_dst[159:152] := Saturate8(a[191:176])
+tmp_dst[167:160] := Saturate8(a[207:192])
+tmp_dst[175:168] := Saturate8(a[223:208])
+tmp_dst[183:176] := Saturate8(a[239:224])
+tmp_dst[191:184] := Saturate8(a[255:240])
+tmp_dst[199:192] := Saturate8(b[143:128])
+tmp_dst[207:200] := Saturate8(b[159:144])
+tmp_dst[215:208] := Saturate8(b[175:160])
+tmp_dst[223:216] := Saturate8(b[191:176])
+tmp_dst[231:224] := Saturate8(b[207:192])
+tmp_dst[239:232] := Saturate8(b[223:208])
+tmp_dst[247:240] := Saturate8(b[239:224])
+tmp_dst[255:248] := Saturate8(b[255:240])
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKSSWB" form="ymm {z}, ymm, ymm" xed="VPACKSSWB_YMMi8_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_packs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="SI8"/>
+ <parameter type="__m512i" varname="src" etype="SI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := Saturate8(a[15:0])
+tmp_dst[15:8] := Saturate8(a[31:16])
+tmp_dst[23:16] := Saturate8(a[47:32])
+tmp_dst[31:24] := Saturate8(a[63:48])
+tmp_dst[39:32] := Saturate8(a[79:64])
+tmp_dst[47:40] := Saturate8(a[95:80])
+tmp_dst[55:48] := Saturate8(a[111:96])
+tmp_dst[63:56] := Saturate8(a[127:112])
+tmp_dst[71:64] := Saturate8(b[15:0])
+tmp_dst[79:72] := Saturate8(b[31:16])
+tmp_dst[87:80] := Saturate8(b[47:32])
+tmp_dst[95:88] := Saturate8(b[63:48])
+tmp_dst[103:96] := Saturate8(b[79:64])
+tmp_dst[111:104] := Saturate8(b[95:80])
+tmp_dst[119:112] := Saturate8(b[111:96])
+tmp_dst[127:120] := Saturate8(b[127:112])
+tmp_dst[135:128] := Saturate8(a[143:128])
+tmp_dst[143:136] := Saturate8(a[159:144])
+tmp_dst[151:144] := Saturate8(a[175:160])
+tmp_dst[159:152] := Saturate8(a[191:176])
+tmp_dst[167:160] := Saturate8(a[207:192])
+tmp_dst[175:168] := Saturate8(a[223:208])
+tmp_dst[183:176] := Saturate8(a[239:224])
+tmp_dst[191:184] := Saturate8(a[255:240])
+tmp_dst[199:192] := Saturate8(b[143:128])
+tmp_dst[207:200] := Saturate8(b[159:144])
+tmp_dst[215:208] := Saturate8(b[175:160])
+tmp_dst[223:216] := Saturate8(b[191:176])
+tmp_dst[231:224] := Saturate8(b[207:192])
+tmp_dst[239:232] := Saturate8(b[223:208])
+tmp_dst[247:240] := Saturate8(b[239:224])
+tmp_dst[255:248] := Saturate8(b[255:240])
+tmp_dst[263:256] := Saturate8(a[271:256])
+tmp_dst[271:264] := Saturate8(a[287:272])
+tmp_dst[279:272] := Saturate8(a[303:288])
+tmp_dst[287:280] := Saturate8(a[319:304])
+tmp_dst[295:288] := Saturate8(a[335:320])
+tmp_dst[303:296] := Saturate8(a[351:336])
+tmp_dst[311:304] := Saturate8(a[367:352])
+tmp_dst[319:312] := Saturate8(a[383:368])
+tmp_dst[327:320] := Saturate8(b[271:256])
+tmp_dst[335:328] := Saturate8(b[287:272])
+tmp_dst[343:336] := Saturate8(b[303:288])
+tmp_dst[351:344] := Saturate8(b[319:304])
+tmp_dst[359:352] := Saturate8(b[335:320])
+tmp_dst[367:360] := Saturate8(b[351:336])
+tmp_dst[375:368] := Saturate8(b[367:352])
+tmp_dst[383:376] := Saturate8(b[383:368])
+tmp_dst[391:384] := Saturate8(a[399:384])
+tmp_dst[399:392] := Saturate8(a[415:400])
+tmp_dst[407:400] := Saturate8(a[431:416])
+tmp_dst[415:408] := Saturate8(a[447:432])
+tmp_dst[423:416] := Saturate8(a[463:448])
+tmp_dst[431:424] := Saturate8(a[479:464])
+tmp_dst[439:432] := Saturate8(a[495:480])
+tmp_dst[447:440] := Saturate8(a[511:496])
+tmp_dst[455:448] := Saturate8(b[399:384])
+tmp_dst[463:456] := Saturate8(b[415:400])
+tmp_dst[471:464] := Saturate8(b[431:416])
+tmp_dst[479:472] := Saturate8(b[447:432])
+tmp_dst[487:480] := Saturate8(b[463:448])
+tmp_dst[495:488] := Saturate8(b[479:464])
+tmp_dst[503:496] := Saturate8(b[495:480])
+tmp_dst[511:504] := Saturate8(b[511:496])
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSSWB" form="zmm {k}, zmm, zmm" xed="VPACKSSWB_ZMMi8_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_packs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := Saturate8(a[15:0])
+tmp_dst[15:8] := Saturate8(a[31:16])
+tmp_dst[23:16] := Saturate8(a[47:32])
+tmp_dst[31:24] := Saturate8(a[63:48])
+tmp_dst[39:32] := Saturate8(a[79:64])
+tmp_dst[47:40] := Saturate8(a[95:80])
+tmp_dst[55:48] := Saturate8(a[111:96])
+tmp_dst[63:56] := Saturate8(a[127:112])
+tmp_dst[71:64] := Saturate8(b[15:0])
+tmp_dst[79:72] := Saturate8(b[31:16])
+tmp_dst[87:80] := Saturate8(b[47:32])
+tmp_dst[95:88] := Saturate8(b[63:48])
+tmp_dst[103:96] := Saturate8(b[79:64])
+tmp_dst[111:104] := Saturate8(b[95:80])
+tmp_dst[119:112] := Saturate8(b[111:96])
+tmp_dst[127:120] := Saturate8(b[127:112])
+tmp_dst[135:128] := Saturate8(a[143:128])
+tmp_dst[143:136] := Saturate8(a[159:144])
+tmp_dst[151:144] := Saturate8(a[175:160])
+tmp_dst[159:152] := Saturate8(a[191:176])
+tmp_dst[167:160] := Saturate8(a[207:192])
+tmp_dst[175:168] := Saturate8(a[223:208])
+tmp_dst[183:176] := Saturate8(a[239:224])
+tmp_dst[191:184] := Saturate8(a[255:240])
+tmp_dst[199:192] := Saturate8(b[143:128])
+tmp_dst[207:200] := Saturate8(b[159:144])
+tmp_dst[215:208] := Saturate8(b[175:160])
+tmp_dst[223:216] := Saturate8(b[191:176])
+tmp_dst[231:224] := Saturate8(b[207:192])
+tmp_dst[239:232] := Saturate8(b[223:208])
+tmp_dst[247:240] := Saturate8(b[239:224])
+tmp_dst[255:248] := Saturate8(b[255:240])
+tmp_dst[263:256] := Saturate8(a[271:256])
+tmp_dst[271:264] := Saturate8(a[287:272])
+tmp_dst[279:272] := Saturate8(a[303:288])
+tmp_dst[287:280] := Saturate8(a[319:304])
+tmp_dst[295:288] := Saturate8(a[335:320])
+tmp_dst[303:296] := Saturate8(a[351:336])
+tmp_dst[311:304] := Saturate8(a[367:352])
+tmp_dst[319:312] := Saturate8(a[383:368])
+tmp_dst[327:320] := Saturate8(b[271:256])
+tmp_dst[335:328] := Saturate8(b[287:272])
+tmp_dst[343:336] := Saturate8(b[303:288])
+tmp_dst[351:344] := Saturate8(b[319:304])
+tmp_dst[359:352] := Saturate8(b[335:320])
+tmp_dst[367:360] := Saturate8(b[351:336])
+tmp_dst[375:368] := Saturate8(b[367:352])
+tmp_dst[383:376] := Saturate8(b[383:368])
+tmp_dst[391:384] := Saturate8(a[399:384])
+tmp_dst[399:392] := Saturate8(a[415:400])
+tmp_dst[407:400] := Saturate8(a[431:416])
+tmp_dst[415:408] := Saturate8(a[447:432])
+tmp_dst[423:416] := Saturate8(a[463:448])
+tmp_dst[431:424] := Saturate8(a[479:464])
+tmp_dst[439:432] := Saturate8(a[495:480])
+tmp_dst[447:440] := Saturate8(a[511:496])
+tmp_dst[455:448] := Saturate8(b[399:384])
+tmp_dst[463:456] := Saturate8(b[415:400])
+tmp_dst[471:464] := Saturate8(b[431:416])
+tmp_dst[479:472] := Saturate8(b[447:432])
+tmp_dst[487:480] := Saturate8(b[463:448])
+tmp_dst[495:488] := Saturate8(b[479:464])
+tmp_dst[503:496] := Saturate8(b[495:480])
+tmp_dst[511:504] := Saturate8(b[511:496])
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSSWB" form="zmm {z}, zmm, zmm" xed="VPACKSSWB_ZMMi8_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_packs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="SI8"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst".</description>
+ <operation>
+dst[7:0] := Saturate8(a[15:0])
+dst[15:8] := Saturate8(a[31:16])
+dst[23:16] := Saturate8(a[47:32])
+dst[31:24] := Saturate8(a[63:48])
+dst[39:32] := Saturate8(a[79:64])
+dst[47:40] := Saturate8(a[95:80])
+dst[55:48] := Saturate8(a[111:96])
+dst[63:56] := Saturate8(a[127:112])
+dst[71:64] := Saturate8(b[15:0])
+dst[79:72] := Saturate8(b[31:16])
+dst[87:80] := Saturate8(b[47:32])
+dst[95:88] := Saturate8(b[63:48])
+dst[103:96] := Saturate8(b[79:64])
+dst[111:104] := Saturate8(b[95:80])
+dst[119:112] := Saturate8(b[111:96])
+dst[127:120] := Saturate8(b[127:112])
+dst[135:128] := Saturate8(a[143:128])
+dst[143:136] := Saturate8(a[159:144])
+dst[151:144] := Saturate8(a[175:160])
+dst[159:152] := Saturate8(a[191:176])
+dst[167:160] := Saturate8(a[207:192])
+dst[175:168] := Saturate8(a[223:208])
+dst[183:176] := Saturate8(a[239:224])
+dst[191:184] := Saturate8(a[255:240])
+dst[199:192] := Saturate8(b[143:128])
+dst[207:200] := Saturate8(b[159:144])
+dst[215:208] := Saturate8(b[175:160])
+dst[223:216] := Saturate8(b[191:176])
+dst[231:224] := Saturate8(b[207:192])
+dst[239:232] := Saturate8(b[223:208])
+dst[247:240] := Saturate8(b[239:224])
+dst[255:248] := Saturate8(b[255:240])
+dst[263:256] := Saturate8(a[271:256])
+dst[271:264] := Saturate8(a[287:272])
+dst[279:272] := Saturate8(a[303:288])
+dst[287:280] := Saturate8(a[319:304])
+dst[295:288] := Saturate8(a[335:320])
+dst[303:296] := Saturate8(a[351:336])
+dst[311:304] := Saturate8(a[367:352])
+dst[319:312] := Saturate8(a[383:368])
+dst[327:320] := Saturate8(b[271:256])
+dst[335:328] := Saturate8(b[287:272])
+dst[343:336] := Saturate8(b[303:288])
+dst[351:344] := Saturate8(b[319:304])
+dst[359:352] := Saturate8(b[335:320])
+dst[367:360] := Saturate8(b[351:336])
+dst[375:368] := Saturate8(b[367:352])
+dst[383:376] := Saturate8(b[383:368])
+dst[391:384] := Saturate8(a[399:384])
+dst[399:392] := Saturate8(a[415:400])
+dst[407:400] := Saturate8(a[431:416])
+dst[415:408] := Saturate8(a[447:432])
+dst[423:416] := Saturate8(a[463:448])
+dst[431:424] := Saturate8(a[479:464])
+dst[439:432] := Saturate8(a[495:480])
+dst[447:440] := Saturate8(a[511:496])
+dst[455:448] := Saturate8(b[399:384])
+dst[463:456] := Saturate8(b[415:400])
+dst[471:464] := Saturate8(b[431:416])
+dst[479:472] := Saturate8(b[447:432])
+dst[487:480] := Saturate8(b[463:448])
+dst[495:488] := Saturate8(b[479:464])
+dst[503:496] := Saturate8(b[495:480])
+dst[511:504] := Saturate8(b[511:496])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSSWB" form="zmm, zmm, zmm" xed="VPACKSSWB_ZMMi8_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_packs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="src" etype="SI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := Saturate8(a[15:0])
+tmp_dst[15:8] := Saturate8(a[31:16])
+tmp_dst[23:16] := Saturate8(a[47:32])
+tmp_dst[31:24] := Saturate8(a[63:48])
+tmp_dst[39:32] := Saturate8(a[79:64])
+tmp_dst[47:40] := Saturate8(a[95:80])
+tmp_dst[55:48] := Saturate8(a[111:96])
+tmp_dst[63:56] := Saturate8(a[127:112])
+tmp_dst[71:64] := Saturate8(b[15:0])
+tmp_dst[79:72] := Saturate8(b[31:16])
+tmp_dst[87:80] := Saturate8(b[47:32])
+tmp_dst[95:88] := Saturate8(b[63:48])
+tmp_dst[103:96] := Saturate8(b[79:64])
+tmp_dst[111:104] := Saturate8(b[95:80])
+tmp_dst[119:112] := Saturate8(b[111:96])
+tmp_dst[127:120] := Saturate8(b[127:112])
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPACKSSWB" form="xmm {k}, xmm, xmm" xed="VPACKSSWB_XMMi8_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_packs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := Saturate8(a[15:0])
+tmp_dst[15:8] := Saturate8(a[31:16])
+tmp_dst[23:16] := Saturate8(a[47:32])
+tmp_dst[31:24] := Saturate8(a[63:48])
+tmp_dst[39:32] := Saturate8(a[79:64])
+tmp_dst[47:40] := Saturate8(a[95:80])
+tmp_dst[55:48] := Saturate8(a[111:96])
+tmp_dst[63:56] := Saturate8(a[127:112])
+tmp_dst[71:64] := Saturate8(b[15:0])
+tmp_dst[79:72] := Saturate8(b[31:16])
+tmp_dst[87:80] := Saturate8(b[47:32])
+tmp_dst[95:88] := Saturate8(b[63:48])
+tmp_dst[103:96] := Saturate8(b[79:64])
+tmp_dst[111:104] := Saturate8(b[95:80])
+tmp_dst[119:112] := Saturate8(b[111:96])
+tmp_dst[127:120] := Saturate8(b[127:112])
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPACKSSWB" form="xmm {z}, xmm, xmm" xed="VPACKSSWB_XMMi8_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_packus_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := SaturateU16(a[31:0])
+tmp_dst[31:16] := SaturateU16(a[63:32])
+tmp_dst[47:32] := SaturateU16(a[95:64])
+tmp_dst[63:48] := SaturateU16(a[127:96])
+tmp_dst[79:64] := SaturateU16(b[31:0])
+tmp_dst[95:80] := SaturateU16(b[63:32])
+tmp_dst[111:96] := SaturateU16(b[95:64])
+tmp_dst[127:112] := SaturateU16(b[127:96])
+tmp_dst[143:128] := SaturateU16(a[159:128])
+tmp_dst[159:144] := SaturateU16(a[191:160])
+tmp_dst[175:160] := SaturateU16(a[223:192])
+tmp_dst[191:176] := SaturateU16(a[255:224])
+tmp_dst[207:192] := SaturateU16(b[159:128])
+tmp_dst[223:208] := SaturateU16(b[191:160])
+tmp_dst[239:224] := SaturateU16(b[223:192])
+tmp_dst[255:240] := SaturateU16(b[255:224])
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKUSDW" form="ymm {k}, ymm, ymm" xed="VPACKUSDW_YMMu16_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_packus_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := SaturateU16(a[31:0])
+tmp_dst[31:16] := SaturateU16(a[63:32])
+tmp_dst[47:32] := SaturateU16(a[95:64])
+tmp_dst[63:48] := SaturateU16(a[127:96])
+tmp_dst[79:64] := SaturateU16(b[31:0])
+tmp_dst[95:80] := SaturateU16(b[63:32])
+tmp_dst[111:96] := SaturateU16(b[95:64])
+tmp_dst[127:112] := SaturateU16(b[127:96])
+tmp_dst[143:128] := SaturateU16(a[159:128])
+tmp_dst[159:144] := SaturateU16(a[191:160])
+tmp_dst[175:160] := SaturateU16(a[223:192])
+tmp_dst[191:176] := SaturateU16(a[255:224])
+tmp_dst[207:192] := SaturateU16(b[159:128])
+tmp_dst[223:208] := SaturateU16(b[191:160])
+tmp_dst[239:224] := SaturateU16(b[223:192])
+tmp_dst[255:240] := SaturateU16(b[255:224])
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKUSDW" form="ymm {z}, ymm, ymm" xed="VPACKUSDW_YMMu16_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_packus_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := SaturateU16(a[31:0])
+tmp_dst[31:16] := SaturateU16(a[63:32])
+tmp_dst[47:32] := SaturateU16(a[95:64])
+tmp_dst[63:48] := SaturateU16(a[127:96])
+tmp_dst[79:64] := SaturateU16(b[31:0])
+tmp_dst[95:80] := SaturateU16(b[63:32])
+tmp_dst[111:96] := SaturateU16(b[95:64])
+tmp_dst[127:112] := SaturateU16(b[127:96])
+tmp_dst[143:128] := SaturateU16(a[159:128])
+tmp_dst[159:144] := SaturateU16(a[191:160])
+tmp_dst[175:160] := SaturateU16(a[223:192])
+tmp_dst[191:176] := SaturateU16(a[255:224])
+tmp_dst[207:192] := SaturateU16(b[159:128])
+tmp_dst[223:208] := SaturateU16(b[191:160])
+tmp_dst[239:224] := SaturateU16(b[223:192])
+tmp_dst[255:240] := SaturateU16(b[255:224])
+tmp_dst[271:256] := SaturateU16(a[287:256])
+tmp_dst[287:272] := SaturateU16(a[319:288])
+tmp_dst[303:288] := SaturateU16(a[351:320])
+tmp_dst[319:304] := SaturateU16(a[383:352])
+tmp_dst[335:320] := SaturateU16(b[287:256])
+tmp_dst[351:336] := SaturateU16(b[319:288])
+tmp_dst[367:352] := SaturateU16(b[351:320])
+tmp_dst[383:368] := SaturateU16(b[383:352])
+tmp_dst[399:384] := SaturateU16(a[415:384])
+tmp_dst[415:400] := SaturateU16(a[447:416])
+tmp_dst[431:416] := SaturateU16(a[479:448])
+tmp_dst[447:432] := SaturateU16(a[511:480])
+tmp_dst[463:448] := SaturateU16(b[415:384])
+tmp_dst[479:464] := SaturateU16(b[447:416])
+tmp_dst[495:480] := SaturateU16(b[479:448])
+tmp_dst[511:496] := SaturateU16(b[511:480])
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKUSDW" form="zmm {k}, zmm, zmm" xed="VPACKUSDW_ZMMu16_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_packus_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := SaturateU16(a[31:0])
+tmp_dst[31:16] := SaturateU16(a[63:32])
+tmp_dst[47:32] := SaturateU16(a[95:64])
+tmp_dst[63:48] := SaturateU16(a[127:96])
+tmp_dst[79:64] := SaturateU16(b[31:0])
+tmp_dst[95:80] := SaturateU16(b[63:32])
+tmp_dst[111:96] := SaturateU16(b[95:64])
+tmp_dst[127:112] := SaturateU16(b[127:96])
+tmp_dst[143:128] := SaturateU16(a[159:128])
+tmp_dst[159:144] := SaturateU16(a[191:160])
+tmp_dst[175:160] := SaturateU16(a[223:192])
+tmp_dst[191:176] := SaturateU16(a[255:224])
+tmp_dst[207:192] := SaturateU16(b[159:128])
+tmp_dst[223:208] := SaturateU16(b[191:160])
+tmp_dst[239:224] := SaturateU16(b[223:192])
+tmp_dst[255:240] := SaturateU16(b[255:224])
+tmp_dst[271:256] := SaturateU16(a[287:256])
+tmp_dst[287:272] := SaturateU16(a[319:288])
+tmp_dst[303:288] := SaturateU16(a[351:320])
+tmp_dst[319:304] := SaturateU16(a[383:352])
+tmp_dst[335:320] := SaturateU16(b[287:256])
+tmp_dst[351:336] := SaturateU16(b[319:288])
+tmp_dst[367:352] := SaturateU16(b[351:320])
+tmp_dst[383:368] := SaturateU16(b[383:352])
+tmp_dst[399:384] := SaturateU16(a[415:384])
+tmp_dst[415:400] := SaturateU16(a[447:416])
+tmp_dst[431:416] := SaturateU16(a[479:448])
+tmp_dst[447:432] := SaturateU16(a[511:480])
+tmp_dst[463:448] := SaturateU16(b[415:384])
+tmp_dst[479:464] := SaturateU16(b[447:416])
+tmp_dst[495:480] := SaturateU16(b[479:448])
+tmp_dst[511:496] := SaturateU16(b[511:480])
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKUSDW" form="zmm {z}, zmm, zmm" xed="VPACKUSDW_ZMMu16_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_packus_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst".</description>
+ <operation>
+dst[15:0] := SaturateU16(a[31:0])
+dst[31:16] := SaturateU16(a[63:32])
+dst[47:32] := SaturateU16(a[95:64])
+dst[63:48] := SaturateU16(a[127:96])
+dst[79:64] := SaturateU16(b[31:0])
+dst[95:80] := SaturateU16(b[63:32])
+dst[111:96] := SaturateU16(b[95:64])
+dst[127:112] := SaturateU16(b[127:96])
+dst[143:128] := SaturateU16(a[159:128])
+dst[159:144] := SaturateU16(a[191:160])
+dst[175:160] := SaturateU16(a[223:192])
+dst[191:176] := SaturateU16(a[255:224])
+dst[207:192] := SaturateU16(b[159:128])
+dst[223:208] := SaturateU16(b[191:160])
+dst[239:224] := SaturateU16(b[223:192])
+dst[255:240] := SaturateU16(b[255:224])
+dst[271:256] := SaturateU16(a[287:256])
+dst[287:272] := SaturateU16(a[319:288])
+dst[303:288] := SaturateU16(a[351:320])
+dst[319:304] := SaturateU16(a[383:352])
+dst[335:320] := SaturateU16(b[287:256])
+dst[351:336] := SaturateU16(b[319:288])
+dst[367:352] := SaturateU16(b[351:320])
+dst[383:368] := SaturateU16(b[383:352])
+dst[399:384] := SaturateU16(a[415:384])
+dst[415:400] := SaturateU16(a[447:416])
+dst[431:416] := SaturateU16(a[479:448])
+dst[447:432] := SaturateU16(a[511:480])
+dst[463:448] := SaturateU16(b[415:384])
+dst[479:464] := SaturateU16(b[447:416])
+dst[495:480] := SaturateU16(b[479:448])
+dst[511:496] := SaturateU16(b[511:480])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKUSDW" form="zmm, zmm, zmm" xed="VPACKUSDW_ZMMu16_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_packus_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := SaturateU16(a[31:0])
+tmp_dst[31:16] := SaturateU16(a[63:32])
+tmp_dst[47:32] := SaturateU16(a[95:64])
+tmp_dst[63:48] := SaturateU16(a[127:96])
+tmp_dst[79:64] := SaturateU16(b[31:0])
+tmp_dst[95:80] := SaturateU16(b[63:32])
+tmp_dst[111:96] := SaturateU16(b[95:64])
+tmp_dst[127:112] := SaturateU16(b[127:96])
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPACKUSDW" form="xmm {k}, xmm, xmm" xed="VPACKUSDW_XMMu16_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_packus_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := SaturateU16(a[31:0])
+tmp_dst[31:16] := SaturateU16(a[63:32])
+tmp_dst[47:32] := SaturateU16(a[95:64])
+tmp_dst[63:48] := SaturateU16(a[127:96])
+tmp_dst[79:64] := SaturateU16(b[31:0])
+tmp_dst[95:80] := SaturateU16(b[63:32])
+tmp_dst[111:96] := SaturateU16(b[95:64])
+tmp_dst[127:112] := SaturateU16(b[127:96])
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPACKUSDW" form="xmm {z}, xmm, xmm" xed="VPACKUSDW_XMMu16_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_packus_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := SaturateU8(a[15:0])
+tmp_dst[15:8] := SaturateU8(a[31:16])
+tmp_dst[23:16] := SaturateU8(a[47:32])
+tmp_dst[31:24] := SaturateU8(a[63:48])
+tmp_dst[39:32] := SaturateU8(a[79:64])
+tmp_dst[47:40] := SaturateU8(a[95:80])
+tmp_dst[55:48] := SaturateU8(a[111:96])
+tmp_dst[63:56] := SaturateU8(a[127:112])
+tmp_dst[71:64] := SaturateU8(b[15:0])
+tmp_dst[79:72] := SaturateU8(b[31:16])
+tmp_dst[87:80] := SaturateU8(b[47:32])
+tmp_dst[95:88] := SaturateU8(b[63:48])
+tmp_dst[103:96] := SaturateU8(b[79:64])
+tmp_dst[111:104] := SaturateU8(b[95:80])
+tmp_dst[119:112] := SaturateU8(b[111:96])
+tmp_dst[127:120] := SaturateU8(b[127:112])
+tmp_dst[135:128] := SaturateU8(a[143:128])
+tmp_dst[143:136] := SaturateU8(a[159:144])
+tmp_dst[151:144] := SaturateU8(a[175:160])
+tmp_dst[159:152] := SaturateU8(a[191:176])
+tmp_dst[167:160] := SaturateU8(a[207:192])
+tmp_dst[175:168] := SaturateU8(a[223:208])
+tmp_dst[183:176] := SaturateU8(a[239:224])
+tmp_dst[191:184] := SaturateU8(a[255:240])
+tmp_dst[199:192] := SaturateU8(b[143:128])
+tmp_dst[207:200] := SaturateU8(b[159:144])
+tmp_dst[215:208] := SaturateU8(b[175:160])
+tmp_dst[223:216] := SaturateU8(b[191:176])
+tmp_dst[231:224] := SaturateU8(b[207:192])
+tmp_dst[239:232] := SaturateU8(b[223:208])
+tmp_dst[247:240] := SaturateU8(b[239:224])
+tmp_dst[255:248] := SaturateU8(b[255:240])
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKUSWB" form="ymm {k}, ymm, ymm" xed="VPACKUSWB_YMMu8_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_packus_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := SaturateU8(a[15:0])
+tmp_dst[15:8] := SaturateU8(a[31:16])
+tmp_dst[23:16] := SaturateU8(a[47:32])
+tmp_dst[31:24] := SaturateU8(a[63:48])
+tmp_dst[39:32] := SaturateU8(a[79:64])
+tmp_dst[47:40] := SaturateU8(a[95:80])
+tmp_dst[55:48] := SaturateU8(a[111:96])
+tmp_dst[63:56] := SaturateU8(a[127:112])
+tmp_dst[71:64] := SaturateU8(b[15:0])
+tmp_dst[79:72] := SaturateU8(b[31:16])
+tmp_dst[87:80] := SaturateU8(b[47:32])
+tmp_dst[95:88] := SaturateU8(b[63:48])
+tmp_dst[103:96] := SaturateU8(b[79:64])
+tmp_dst[111:104] := SaturateU8(b[95:80])
+tmp_dst[119:112] := SaturateU8(b[111:96])
+tmp_dst[127:120] := SaturateU8(b[127:112])
+tmp_dst[135:128] := SaturateU8(a[143:128])
+tmp_dst[143:136] := SaturateU8(a[159:144])
+tmp_dst[151:144] := SaturateU8(a[175:160])
+tmp_dst[159:152] := SaturateU8(a[191:176])
+tmp_dst[167:160] := SaturateU8(a[207:192])
+tmp_dst[175:168] := SaturateU8(a[223:208])
+tmp_dst[183:176] := SaturateU8(a[239:224])
+tmp_dst[191:184] := SaturateU8(a[255:240])
+tmp_dst[199:192] := SaturateU8(b[143:128])
+tmp_dst[207:200] := SaturateU8(b[159:144])
+tmp_dst[215:208] := SaturateU8(b[175:160])
+tmp_dst[223:216] := SaturateU8(b[191:176])
+tmp_dst[231:224] := SaturateU8(b[207:192])
+tmp_dst[239:232] := SaturateU8(b[223:208])
+tmp_dst[247:240] := SaturateU8(b[239:224])
+tmp_dst[255:248] := SaturateU8(b[255:240])
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPACKUSWB" form="ymm {z}, ymm, ymm" xed="VPACKUSWB_YMMu8_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_packus_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := SaturateU8(a[15:0])
+tmp_dst[15:8] := SaturateU8(a[31:16])
+tmp_dst[23:16] := SaturateU8(a[47:32])
+tmp_dst[31:24] := SaturateU8(a[63:48])
+tmp_dst[39:32] := SaturateU8(a[79:64])
+tmp_dst[47:40] := SaturateU8(a[95:80])
+tmp_dst[55:48] := SaturateU8(a[111:96])
+tmp_dst[63:56] := SaturateU8(a[127:112])
+tmp_dst[71:64] := SaturateU8(b[15:0])
+tmp_dst[79:72] := SaturateU8(b[31:16])
+tmp_dst[87:80] := SaturateU8(b[47:32])
+tmp_dst[95:88] := SaturateU8(b[63:48])
+tmp_dst[103:96] := SaturateU8(b[79:64])
+tmp_dst[111:104] := SaturateU8(b[95:80])
+tmp_dst[119:112] := SaturateU8(b[111:96])
+tmp_dst[127:120] := SaturateU8(b[127:112])
+tmp_dst[135:128] := SaturateU8(a[143:128])
+tmp_dst[143:136] := SaturateU8(a[159:144])
+tmp_dst[151:144] := SaturateU8(a[175:160])
+tmp_dst[159:152] := SaturateU8(a[191:176])
+tmp_dst[167:160] := SaturateU8(a[207:192])
+tmp_dst[175:168] := SaturateU8(a[223:208])
+tmp_dst[183:176] := SaturateU8(a[239:224])
+tmp_dst[191:184] := SaturateU8(a[255:240])
+tmp_dst[199:192] := SaturateU8(b[143:128])
+tmp_dst[207:200] := SaturateU8(b[159:144])
+tmp_dst[215:208] := SaturateU8(b[175:160])
+tmp_dst[223:216] := SaturateU8(b[191:176])
+tmp_dst[231:224] := SaturateU8(b[207:192])
+tmp_dst[239:232] := SaturateU8(b[223:208])
+tmp_dst[247:240] := SaturateU8(b[239:224])
+tmp_dst[255:248] := SaturateU8(b[255:240])
+tmp_dst[263:256] := SaturateU8(a[271:256])
+tmp_dst[271:264] := SaturateU8(a[287:272])
+tmp_dst[279:272] := SaturateU8(a[303:288])
+tmp_dst[287:280] := SaturateU8(a[319:304])
+tmp_dst[295:288] := SaturateU8(a[335:320])
+tmp_dst[303:296] := SaturateU8(a[351:336])
+tmp_dst[311:304] := SaturateU8(a[367:352])
+tmp_dst[319:312] := SaturateU8(a[383:368])
+tmp_dst[327:320] := SaturateU8(b[271:256])
+tmp_dst[335:328] := SaturateU8(b[287:272])
+tmp_dst[343:336] := SaturateU8(b[303:288])
+tmp_dst[351:344] := SaturateU8(b[319:304])
+tmp_dst[359:352] := SaturateU8(b[335:320])
+tmp_dst[367:360] := SaturateU8(b[351:336])
+tmp_dst[375:368] := SaturateU8(b[367:352])
+tmp_dst[383:376] := SaturateU8(b[383:368])
+tmp_dst[391:384] := SaturateU8(a[399:384])
+tmp_dst[399:392] := SaturateU8(a[415:400])
+tmp_dst[407:400] := SaturateU8(a[431:416])
+tmp_dst[415:408] := SaturateU8(a[447:432])
+tmp_dst[423:416] := SaturateU8(a[463:448])
+tmp_dst[431:424] := SaturateU8(a[479:464])
+tmp_dst[439:432] := SaturateU8(a[495:480])
+tmp_dst[447:440] := SaturateU8(a[511:496])
+tmp_dst[455:448] := SaturateU8(b[399:384])
+tmp_dst[463:456] := SaturateU8(b[415:400])
+tmp_dst[471:464] := SaturateU8(b[431:416])
+tmp_dst[479:472] := SaturateU8(b[447:432])
+tmp_dst[487:480] := SaturateU8(b[463:448])
+tmp_dst[495:488] := SaturateU8(b[479:464])
+tmp_dst[503:496] := SaturateU8(b[495:480])
+tmp_dst[511:504] := SaturateU8(b[511:496])
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKUSWB" form="zmm {k}, zmm, zmm" xed="VPACKUSWB_ZMMu8_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_packus_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := SaturateU8(a[15:0])
+tmp_dst[15:8] := SaturateU8(a[31:16])
+tmp_dst[23:16] := SaturateU8(a[47:32])
+tmp_dst[31:24] := SaturateU8(a[63:48])
+tmp_dst[39:32] := SaturateU8(a[79:64])
+tmp_dst[47:40] := SaturateU8(a[95:80])
+tmp_dst[55:48] := SaturateU8(a[111:96])
+tmp_dst[63:56] := SaturateU8(a[127:112])
+tmp_dst[71:64] := SaturateU8(b[15:0])
+tmp_dst[79:72] := SaturateU8(b[31:16])
+tmp_dst[87:80] := SaturateU8(b[47:32])
+tmp_dst[95:88] := SaturateU8(b[63:48])
+tmp_dst[103:96] := SaturateU8(b[79:64])
+tmp_dst[111:104] := SaturateU8(b[95:80])
+tmp_dst[119:112] := SaturateU8(b[111:96])
+tmp_dst[127:120] := SaturateU8(b[127:112])
+tmp_dst[135:128] := SaturateU8(a[143:128])
+tmp_dst[143:136] := SaturateU8(a[159:144])
+tmp_dst[151:144] := SaturateU8(a[175:160])
+tmp_dst[159:152] := SaturateU8(a[191:176])
+tmp_dst[167:160] := SaturateU8(a[207:192])
+tmp_dst[175:168] := SaturateU8(a[223:208])
+tmp_dst[183:176] := SaturateU8(a[239:224])
+tmp_dst[191:184] := SaturateU8(a[255:240])
+tmp_dst[199:192] := SaturateU8(b[143:128])
+tmp_dst[207:200] := SaturateU8(b[159:144])
+tmp_dst[215:208] := SaturateU8(b[175:160])
+tmp_dst[223:216] := SaturateU8(b[191:176])
+tmp_dst[231:224] := SaturateU8(b[207:192])
+tmp_dst[239:232] := SaturateU8(b[223:208])
+tmp_dst[247:240] := SaturateU8(b[239:224])
+tmp_dst[255:248] := SaturateU8(b[255:240])
+tmp_dst[263:256] := SaturateU8(a[271:256])
+tmp_dst[271:264] := SaturateU8(a[287:272])
+tmp_dst[279:272] := SaturateU8(a[303:288])
+tmp_dst[287:280] := SaturateU8(a[319:304])
+tmp_dst[295:288] := SaturateU8(a[335:320])
+tmp_dst[303:296] := SaturateU8(a[351:336])
+tmp_dst[311:304] := SaturateU8(a[367:352])
+tmp_dst[319:312] := SaturateU8(a[383:368])
+tmp_dst[327:320] := SaturateU8(b[271:256])
+tmp_dst[335:328] := SaturateU8(b[287:272])
+tmp_dst[343:336] := SaturateU8(b[303:288])
+tmp_dst[351:344] := SaturateU8(b[319:304])
+tmp_dst[359:352] := SaturateU8(b[335:320])
+tmp_dst[367:360] := SaturateU8(b[351:336])
+tmp_dst[375:368] := SaturateU8(b[367:352])
+tmp_dst[383:376] := SaturateU8(b[383:368])
+tmp_dst[391:384] := SaturateU8(a[399:384])
+tmp_dst[399:392] := SaturateU8(a[415:400])
+tmp_dst[407:400] := SaturateU8(a[431:416])
+tmp_dst[415:408] := SaturateU8(a[447:432])
+tmp_dst[423:416] := SaturateU8(a[463:448])
+tmp_dst[431:424] := SaturateU8(a[479:464])
+tmp_dst[439:432] := SaturateU8(a[495:480])
+tmp_dst[447:440] := SaturateU8(a[511:496])
+tmp_dst[455:448] := SaturateU8(b[399:384])
+tmp_dst[463:456] := SaturateU8(b[415:400])
+tmp_dst[471:464] := SaturateU8(b[431:416])
+tmp_dst[479:472] := SaturateU8(b[447:432])
+tmp_dst[487:480] := SaturateU8(b[463:448])
+tmp_dst[495:488] := SaturateU8(b[479:464])
+tmp_dst[503:496] := SaturateU8(b[495:480])
+tmp_dst[511:504] := SaturateU8(b[511:496])
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKUSWB" form="zmm {z}, zmm, zmm" xed="VPACKUSWB_ZMMu8_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_packus_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst".</description>
+ <operation>
+dst[7:0] := SaturateU8(a[15:0])
+dst[15:8] := SaturateU8(a[31:16])
+dst[23:16] := SaturateU8(a[47:32])
+dst[31:24] := SaturateU8(a[63:48])
+dst[39:32] := SaturateU8(a[79:64])
+dst[47:40] := SaturateU8(a[95:80])
+dst[55:48] := SaturateU8(a[111:96])
+dst[63:56] := SaturateU8(a[127:112])
+dst[71:64] := SaturateU8(b[15:0])
+dst[79:72] := SaturateU8(b[31:16])
+dst[87:80] := SaturateU8(b[47:32])
+dst[95:88] := SaturateU8(b[63:48])
+dst[103:96] := SaturateU8(b[79:64])
+dst[111:104] := SaturateU8(b[95:80])
+dst[119:112] := SaturateU8(b[111:96])
+dst[127:120] := SaturateU8(b[127:112])
+dst[135:128] := SaturateU8(a[143:128])
+dst[143:136] := SaturateU8(a[159:144])
+dst[151:144] := SaturateU8(a[175:160])
+dst[159:152] := SaturateU8(a[191:176])
+dst[167:160] := SaturateU8(a[207:192])
+dst[175:168] := SaturateU8(a[223:208])
+dst[183:176] := SaturateU8(a[239:224])
+dst[191:184] := SaturateU8(a[255:240])
+dst[199:192] := SaturateU8(b[143:128])
+dst[207:200] := SaturateU8(b[159:144])
+dst[215:208] := SaturateU8(b[175:160])
+dst[223:216] := SaturateU8(b[191:176])
+dst[231:224] := SaturateU8(b[207:192])
+dst[239:232] := SaturateU8(b[223:208])
+dst[247:240] := SaturateU8(b[239:224])
+dst[255:248] := SaturateU8(b[255:240])
+dst[263:256] := SaturateU8(a[271:256])
+dst[271:264] := SaturateU8(a[287:272])
+dst[279:272] := SaturateU8(a[303:288])
+dst[287:280] := SaturateU8(a[319:304])
+dst[295:288] := SaturateU8(a[335:320])
+dst[303:296] := SaturateU8(a[351:336])
+dst[311:304] := SaturateU8(a[367:352])
+dst[319:312] := SaturateU8(a[383:368])
+dst[327:320] := SaturateU8(b[271:256])
+dst[335:328] := SaturateU8(b[287:272])
+dst[343:336] := SaturateU8(b[303:288])
+dst[351:344] := SaturateU8(b[319:304])
+dst[359:352] := SaturateU8(b[335:320])
+dst[367:360] := SaturateU8(b[351:336])
+dst[375:368] := SaturateU8(b[367:352])
+dst[383:376] := SaturateU8(b[383:368])
+dst[391:384] := SaturateU8(a[399:384])
+dst[399:392] := SaturateU8(a[415:400])
+dst[407:400] := SaturateU8(a[431:416])
+dst[415:408] := SaturateU8(a[447:432])
+dst[423:416] := SaturateU8(a[463:448])
+dst[431:424] := SaturateU8(a[479:464])
+dst[439:432] := SaturateU8(a[495:480])
+dst[447:440] := SaturateU8(a[511:496])
+dst[455:448] := SaturateU8(b[399:384])
+dst[463:456] := SaturateU8(b[415:400])
+dst[471:464] := SaturateU8(b[431:416])
+dst[479:472] := SaturateU8(b[447:432])
+dst[487:480] := SaturateU8(b[463:448])
+dst[495:488] := SaturateU8(b[479:464])
+dst[503:496] := SaturateU8(b[495:480])
+dst[511:504] := SaturateU8(b[511:496])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKUSWB" form="zmm, zmm, zmm" xed="VPACKUSWB_ZMMu8_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_packus_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := SaturateU8(a[15:0])
+tmp_dst[15:8] := SaturateU8(a[31:16])
+tmp_dst[23:16] := SaturateU8(a[47:32])
+tmp_dst[31:24] := SaturateU8(a[63:48])
+tmp_dst[39:32] := SaturateU8(a[79:64])
+tmp_dst[47:40] := SaturateU8(a[95:80])
+tmp_dst[55:48] := SaturateU8(a[111:96])
+tmp_dst[63:56] := SaturateU8(a[127:112])
+tmp_dst[71:64] := SaturateU8(b[15:0])
+tmp_dst[79:72] := SaturateU8(b[31:16])
+tmp_dst[87:80] := SaturateU8(b[47:32])
+tmp_dst[95:88] := SaturateU8(b[63:48])
+tmp_dst[103:96] := SaturateU8(b[79:64])
+tmp_dst[111:104] := SaturateU8(b[95:80])
+tmp_dst[119:112] := SaturateU8(b[111:96])
+tmp_dst[127:120] := SaturateU8(b[127:112])
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPACKUSWB" form="xmm {k}, xmm, xmm" xed="VPACKUSWB_XMMu8_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_packus_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[7:0] := SaturateU8(a[15:0])
+tmp_dst[15:8] := SaturateU8(a[31:16])
+tmp_dst[23:16] := SaturateU8(a[47:32])
+tmp_dst[31:24] := SaturateU8(a[63:48])
+tmp_dst[39:32] := SaturateU8(a[79:64])
+tmp_dst[47:40] := SaturateU8(a[95:80])
+tmp_dst[55:48] := SaturateU8(a[111:96])
+tmp_dst[63:56] := SaturateU8(a[127:112])
+tmp_dst[71:64] := SaturateU8(b[15:0])
+tmp_dst[79:72] := SaturateU8(b[31:16])
+tmp_dst[87:80] := SaturateU8(b[47:32])
+tmp_dst[95:88] := SaturateU8(b[63:48])
+tmp_dst[103:96] := SaturateU8(b[79:64])
+tmp_dst[111:104] := SaturateU8(b[95:80])
+tmp_dst[119:112] := SaturateU8(b[111:96])
+tmp_dst[127:120] := SaturateU8(b[127:112])
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPACKUSWB" form="xmm {z}, xmm, xmm" xed="VPACKUSWB_XMMu8_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_add_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Add packed 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] + b[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDB" form="ymm {k}, ymm, ymm" xed="VPADDB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_add_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Add packed 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] + b[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDB" form="ymm {z}, ymm, ymm" xed="VPADDB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_add_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Add packed 8-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := a[i+7:i] + b[i+7:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDB" form="zmm, zmm, zmm" xed="VPADDB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_add_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Add packed 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] + b[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDB" form="zmm {k}, zmm, zmm" xed="VPADDB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_add_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Add packed 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] + b[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDB" form="zmm {z}, zmm, zmm" xed="VPADDB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_add_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Add packed 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] + b[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDB" form="xmm {k}, xmm, xmm" xed="VPADDB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_add_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Add packed 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] + b[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDB" form="xmm {z}, xmm, xmm" xed="VPADDB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_adds_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDSB" form="ymm {k}, ymm, ymm" xed="VPADDSB_YMMi8_MASKmskw_YMMi8_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_adds_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDSB" form="ymm {z}, ymm, ymm" xed="VPADDSB_YMMi8_MASKmskw_YMMi8_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_adds_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDSB" form="zmm, zmm, zmm" xed="VPADDSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_adds_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDSB" form="zmm {k}, zmm, zmm" xed="VPADDSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_adds_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDSB" form="zmm {z}, zmm, zmm" xed="VPADDSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_adds_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDSB" form="xmm {k}, xmm, xmm" xed="VPADDSB_XMMi8_MASKmskw_XMMi8_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_adds_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDSB" form="xmm {z}, xmm, xmm" xed="VPADDSB_XMMi8_MASKmskw_XMMi8_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_adds_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDSW" form="ymm {k}, ymm, ymm" xed="VPADDSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_adds_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDSW" form="ymm {z}, ymm, ymm" xed="VPADDSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_adds_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] )
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDSW" form="zmm, zmm, zmm" xed="VPADDSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_adds_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDSW" form="zmm {k}, zmm, zmm" xed="VPADDSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_adds_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDSW" form="zmm {z}, zmm, zmm" xed="VPADDSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_adds_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDSW" form="xmm {k}, xmm, xmm" xed="VPADDSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_adds_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDSW" form="xmm {z}, xmm, xmm" xed="VPADDSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_adds_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDUSB" form="ymm {k}, ymm, ymm" xed="VPADDUSB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_adds_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDUSB" form="ymm {z}, ymm, ymm" xed="VPADDUSB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_adds_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] )
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDUSB" form="zmm, zmm, zmm" xed="VPADDUSB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_adds_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDUSB" form="zmm {k}, zmm, zmm" xed="VPADDUSB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_adds_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDUSB" form="zmm {z}, zmm, zmm" xed="VPADDUSB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_adds_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDUSB" form="xmm {k}, xmm, xmm" xed="VPADDUSB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_adds_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] )
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDUSB" form="xmm {z}, xmm, xmm" xed="VPADDUSB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_adds_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDUSW" form="ymm {k}, ymm, ymm" xed="VPADDUSW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_adds_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDUSW" form="ymm {z}, ymm, ymm" xed="VPADDUSW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_adds_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] )
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDUSW" form="zmm, zmm, zmm" xed="VPADDUSW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_adds_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDUSW" form="zmm {k}, zmm, zmm" xed="VPADDUSW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_adds_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDUSW" form="zmm {z}, zmm, zmm" xed="VPADDUSW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_adds_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDUSW" form="xmm {k}, xmm, xmm" xed="VPADDUSW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_adds_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] )
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDUSW" form="xmm {z}, xmm, xmm" xed="VPADDUSW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_add_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Add packed 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] + b[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDW" form="ymm {k}, ymm, ymm" xed="VPADDW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_add_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Add packed 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] + b[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDW" form="ymm {z}, ymm, ymm" xed="VPADDW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_add_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Add packed 16-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := a[i+15:i] + b[i+15:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDW" form="zmm, zmm, zmm" xed="VPADDW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_add_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Add packed 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] + b[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDW" form="zmm {k}, zmm, zmm" xed="VPADDW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_add_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Add packed 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] + b[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDW" form="zmm {z}, zmm, zmm" xed="VPADDW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_add_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Add packed 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] + b[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDW" form="xmm {k}, xmm, xmm" xed="VPADDW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_add_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Add packed 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] + b[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDW" form="xmm {z}, xmm, xmm" xed="VPADDW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_alignr_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*128
+ tmp[255:0] := ((a[i+127:i] &lt;&lt; 128)[255:0] OR b[i+127:i]) &gt;&gt; (imm8*8)
+ tmp_dst[i+127:i] := tmp[127:0]
+ENDFOR
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPALIGNR" form="ymm {k}, ymm, ymm, imm8" xed="VPALIGNR_YMMu8_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_alignr_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*128
+ tmp[255:0] := ((a[i+127:i] &lt;&lt; 128)[255:0] OR b[i+127:i]) &gt;&gt; (imm8*8)
+ tmp_dst[i+127:i] := tmp[127:0]
+ENDFOR
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPALIGNR" form="ymm {z}, ymm, ymm, imm8" xed="VPALIGNR_YMMu8_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_alignr_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*128
+ tmp[255:0] := ((a[i+127:i] &lt;&lt; 128)[255:0] OR b[i+127:i]) &gt;&gt; (imm8*8)
+ dst[i+127:i] := tmp[127:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPALIGNR" form="zmm, zmm, zmm, imm8" xed="VPALIGNR_ZMMu8_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_alignr_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*128
+ tmp[255:0] := ((a[i+127:i] &lt;&lt; 128)[255:0] OR b[i+127:i]) &gt;&gt; (imm8*8)
+ tmp_dst[i+127:i] := tmp[127:0]
+ENDFOR
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPALIGNR" form="zmm {k}, zmm, zmm, imm8" xed="VPALIGNR_ZMMu8_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_alignr_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*128
+ tmp[255:0] := ((a[i+127:i] &lt;&lt; 128)[255:0] OR b[i+127:i]) &gt;&gt; (imm8*8)
+ tmp_dst[i+127:i] := tmp[127:0]
+ENDFOR
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPALIGNR" form="zmm {z}, zmm, zmm, imm8" xed="VPALIGNR_ZMMu8_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_alignr_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[255:0] := ((a[127:0] &lt;&lt; 128)[255:0] OR b[127:0]) &gt;&gt; (imm8*8)
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPALIGNR" form="xmm {k}, xmm, xmm, imm8" xed="VPALIGNR_XMMu8_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_alignr_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate pairs of 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[255:0] := ((a[127:0] &lt;&lt; 128)[255:0] OR b[127:0]) &gt;&gt; (imm8*8)
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPALIGNR" form="xmm {z}, xmm, xmm, imm8" xed="VPALIGNR_XMMu8_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_avg_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPAVGB" form="ymm {k}, ymm, ymm" xed="VPAVGB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_avg_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPAVGB" form="ymm {z}, ymm, ymm" xed="VPAVGB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_avg_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) &gt;&gt; 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPAVGB" form="zmm, zmm, zmm" xed="VPAVGB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_avg_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPAVGB" form="zmm {k}, zmm, zmm" xed="VPAVGB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_avg_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPAVGB" form="zmm {z}, zmm, zmm" xed="VPAVGB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_avg_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPAVGB" form="xmm {k}, xmm, xmm" xed="VPAVGB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_avg_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPAVGB" form="xmm {z}, xmm, xmm" xed="VPAVGB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_avg_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPAVGW" form="ymm {k}, ymm, ymm" xed="VPAVGW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_avg_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPAVGW" form="ymm {z}, ymm, ymm" xed="VPAVGW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_avg_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) &gt;&gt; 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPAVGW" form="zmm, zmm, zmm" xed="VPAVGW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_avg_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPAVGW" form="zmm {k}, zmm, zmm" xed="VPAVGW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_avg_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPAVGW" form="zmm {z}, zmm, zmm" xed="VPAVGW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_avg_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPAVGW" form="xmm {k}, xmm, xmm" xed="VPAVGW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_avg_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) &gt;&gt; 1
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPAVGW" form="xmm {z}, xmm, xmm" xed="VPAVGW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_blend_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Blend packed 8-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := b[i+7:i]
+ ELSE
+ dst[i+7:i] := a[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBLENDMB" form="ymm {k}, ymm, ymm" xed="VPBLENDMB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_blend_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Blend packed 8-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := b[i+7:i]
+ ELSE
+ dst[i+7:i] := a[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBLENDMB" form="zmm {k}, zmm, zmm" xed="VPBLENDMB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_blend_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Blend packed 8-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := b[i+7:i]
+ ELSE
+ dst[i+7:i] := a[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBLENDMB" form="xmm {k}, xmm, xmm" xed="VPBLENDMB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_blend_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Blend packed 16-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := b[i+15:i]
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBLENDMW" form="ymm {k}, ymm, ymm" xed="VPBLENDMW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_blend_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Blend packed 16-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := b[i+15:i]
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBLENDMW" form="zmm {k}, zmm, zmm" xed="VPBLENDMW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_blend_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Blend packed 16-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := b[i+15:i]
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBLENDMW" form="xmm {k}, xmm, xmm" xed="VPBLENDMW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_broadcastb_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="ymm {k}, xmm" xed="VPBROADCASTB_YMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_set1_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="char" varname="a" etype="UI8"/>
+ <description>Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="ymm {k}, r8" xed="VPBROADCASTB_YMMu8_MASKmskw_GPR32u8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_broadcastb_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="ymm {z}, xmm" xed="VPBROADCASTB_YMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_set1_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="char" varname="a" etype="UI8"/>
+ <description>Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="ymm {z}, r8" xed="VPBROADCASTB_YMMu8_MASKmskw_GPR32u8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcastb_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Broadcast the low packed 8-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := a[7:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="zmm, xmm" xed="VPBROADCASTB_ZMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcastb_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="zmm {k}, xmm" xed="VPBROADCASTB_ZMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_set1_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="char" varname="a" etype="UI8"/>
+ <description>Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="zmm {k}, r8" xed="VPBROADCASTB_ZMMu8_MASKmskw_GPR32u8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcastb_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="zmm {z}, xmm" xed="VPBROADCASTB_ZMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_set1_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="char" varname="a" etype="UI8"/>
+ <description>Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="zmm {z}, r8" xed="VPBROADCASTB_ZMMu8_MASKmskw_GPR32u8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_broadcastb_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="xmm {k}, xmm" xed="VPBROADCASTB_XMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_set1_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="char" varname="a" etype="UI8"/>
+ <description>Broadcast 8-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="xmm {k}, r8" xed="VPBROADCASTB_XMMu8_MASKmskw_GPR32u8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_broadcastb_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Broadcast the low packed 8-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="xmm {z}, xmm" xed="VPBROADCASTB_XMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_set1_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="char" varname="a" etype="UI8"/>
+ <description>Broadcast 8-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[7:0]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="xmm {z}, r8" xed="VPBROADCASTB_XMMu8_MASKmskw_GPR32u8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_broadcastw_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="ymm {k}, xmm" xed="VPBROADCASTW_YMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_set1_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="short" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="ymm {k}, r16" xed="VPBROADCASTW_YMMu16_MASKmskw_GPR32u16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_broadcastw_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="ymm {z}, xmm" xed="VPBROADCASTW_YMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_set1_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="short" varname="a" etype="UI16"/>
+ <description>Broadcast 16-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="ymm {z}, r16" xed="VPBROADCASTW_YMMu16_MASKmskw_GPR32u16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcastw_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := a[15:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="zmm, xmm" xed="VPBROADCASTW_ZMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcastw_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="zmm {k}, xmm" xed="VPBROADCASTW_ZMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_set1_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="short" varname="a" etype="UI16"/>
+ <description>Broadcast 16-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="zmm {k}, r16" xed="VPBROADCASTW_ZMMu16_MASKmskw_GPR32u16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcastw_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="zmm {z}, xmm" xed="VPBROADCASTW_ZMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_set1_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="short" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="zmm {z}, r16" xed="VPBROADCASTW_ZMMu16_MASKmskw_GPR32u16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_broadcastw_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="xmm {k}, xmm" xed="VPBROADCASTW_XMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_set1_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="short" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="xmm {k}, r16" xed="VPBROADCASTW_XMMu16_MASKmskw_GPR32u16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_broadcastw_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="xmm {z}, xmm" xed="VPBROADCASTW_XMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_set1_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="short" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[15:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="xmm {z}, r16" xed="VPBROADCASTW_XMMu16_MASKmskw_GPR32u16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmp_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, ymm, ymm, imm8" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpeq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpge_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpgt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmple_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmplt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpneq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmp_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, ymm, ymm, imm8" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpeq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpge_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpgt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmple_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmplt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpneq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, ymm, ymm" xed="VPCMPB_MASKmskw_MASKmskw_YMMi8_YMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmp_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, zmm, zmm, imm8" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpeq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpge_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpgt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmple_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmplt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpneq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmp_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, zmm, zmm, imm8" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpeq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpge_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpgt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmple_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmplt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpneq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, zmm, zmm" xed="VPCMPB_MASKmskw_MASKmskw_ZMMi8_ZMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, xmm, xmm, imm8" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpeq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpge_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpgt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmple_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmplt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpneq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, xmm, xmm, imm8" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpeq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpge_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpgt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmple_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmplt_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpneq_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPB" form="k {k}, xmm, xmm" xed="VPCMPB_MASKmskw_MASKmskw_XMMi8_XMMi8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmp_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, ymm, ymm, imm8" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpeq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpge_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpgt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmple_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmplt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpneq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmp_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, ymm, ymm, imm8" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpeq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpge_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpgt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmple_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmplt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpneq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, ymm, ymm" xed="VPCMPUB_MASKmskw_MASKmskw_YMMu8_YMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmp_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, zmm, zmm, imm8" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpeq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpge_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpgt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmple_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmplt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpneq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmp_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, zmm, zmm, imm8" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpeq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpge_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpgt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmple_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmplt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpneq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, zmm, zmm" xed="VPCMPUB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, xmm, xmm, imm8" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpeq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpge_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpgt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmple_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmplt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpneq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] OP b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, xmm, xmm, imm8" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpeq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] == b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpge_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpgt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &gt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmple_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt;= b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmplt_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] &lt; b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpneq_epu8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ( a[i+7:i] != b[i+7:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUB" form="k {k}, xmm, xmm" xed="VPCMPUB_MASKmskw_MASKmskw_XMMu8_XMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmp_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, ymm, ymm, imm8" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpeq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpge_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpgt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmple_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmplt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpneq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmp_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, ymm, ymm, imm8" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpeq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpge_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpgt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmple_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmplt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpneq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, ymm, ymm" xed="VPCMPUW_MASKmskw_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmp_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, zmm, zmm, imm8" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpeq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpge_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpgt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmple_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmplt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpneq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmp_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, zmm, zmm, imm8" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpeq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpge_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpgt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmple_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmplt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpneq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, zmm, zmm" xed="VPCMPUW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, xmm, xmm, imm8" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpeq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpge_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpgt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmple_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmplt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpneq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, xmm, xmm, imm8" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpeq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpge_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpgt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmple_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmplt_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpneq_epu16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUW" form="k {k}, xmm, xmm" xed="VPCMPUW_MASKmskw_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmp_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, ymm, ymm, imm8" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpeq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpge_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpgt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmple_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmplt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpneq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmp_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, ymm, ymm, imm8" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpeq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpge_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpgt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmple_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmplt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpneq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, ymm, ymm" xed="VPCMPW_MASKmskw_MASKmskw_YMMi16_YMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmp_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, zmm, zmm, imm8" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpeq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpge_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpgt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmple_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmplt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpneq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmp_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, zmm, zmm, imm8" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpeq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpge_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpgt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmple_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmplt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpneq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, zmm, zmm" xed="VPCMPW_MASKmskw_MASKmskw_ZMMi16_ZMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, xmm, xmm, imm8" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpeq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpge_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpgt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmple_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmplt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpneq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] OP b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, xmm, xmm, imm8" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpeq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] == b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpge_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpgt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &gt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmple_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt;= b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmplt_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] &lt; b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpneq_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ( a[i+15:i] != b[i+15:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPW" form="k {k}, xmm, xmm" xed="VPCMPW_MASKmskw_MASKmskw_XMMi16_XMMi16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask2_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="idx" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ off := 16*idx[i+3:i]
+ dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off]
+ ELSE
+ dst[i+15:i] := idx[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2W" form="ymm {k}, ymm, ymm" xed="VPERMI2W_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ off := 16*idx[i+3:i]
+ dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off]
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMT2W" form="ymm {k}, ymm, ymm" xed="VPERMT2W_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="idx" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ off := 16*idx[i+3:i]
+ dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2W" form="ymm {z}, ymm, ymm" xed="VPERMI2W_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <instruction name="VPERMT2W" form="ymm {z}, ymm, ymm" xed="VPERMT2W_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="idx" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ off := 16*idx[i+3:i]
+ dst[i+15:i] := idx[i+4] ? b[off+15:off] : a[off+15:off]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2W" form="ymm, ymm, ymm" xed="VPERMI2W_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <instruction name="VPERMT2W" form="ymm, ymm, ymm" xed="VPERMT2W_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask2_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="idx" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ off := 16*idx[i+4:i]
+ dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off]
+ ELSE
+ dst[i+15:i] := idx[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2W" form="zmm {k}, zmm, zmm" xed="VPERMI2W_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ off := 16*idx[i+4:i]
+ dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off]
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMT2W" form="zmm {k}, zmm, zmm" xed="VPERMT2W_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="idx" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ off := 16*idx[i+4:i]
+ dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2W" form="zmm {z}, zmm, zmm" xed="VPERMI2W_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <instruction name="VPERMT2W" form="zmm {z}, zmm, zmm" xed="VPERMT2W_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="idx" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ off := 16*idx[i+4:i]
+ dst[i+15:i] := idx[i+5] ? b[off+15:off] : a[off+15:off]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2W" form="zmm, zmm, zmm" xed="VPERMI2W_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <instruction name="VPERMT2W" form="zmm, zmm, zmm" xed="VPERMT2W_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask2_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="idx" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ off := 16*idx[i+2:i]
+ dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off]
+ ELSE
+ dst[i+15:i] := idx[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2W" form="xmm {k}, xmm, xmm" xed="VPERMI2W_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="idx" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ off := 16*idx[i+2:i]
+ dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off]
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMT2W" form="xmm {k}, xmm, xmm" xed="VPERMT2W_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="idx" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ off := 16*idx[i+2:i]
+ dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2W" form="xmm {z}, xmm, xmm" xed="VPERMI2W_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <instruction name="VPERMT2W" form="xmm {z}, xmm, xmm" xed="VPERMT2W_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_permutex2var_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="idx" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ off := 16*idx[i+2:i]
+ dst[i+15:i] := idx[i+3] ? b[off+15:off] : a[off+15:off]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2W" form="xmm, xmm, xmm" xed="VPERMI2W_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <instruction name="VPERMT2W" form="xmm, xmm, xmm" xed="VPERMT2W_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutexvar_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ id := idx[i+3:i]*16
+ IF k[j]
+ dst[i+15:i] := a[id+15:id]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMW" form="ymm {k}, ymm, ymm" xed="VPERMW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutexvar_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ id := idx[i+3:i]*16
+ IF k[j]
+ dst[i+15:i] := a[id+15:id]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMW" form="ymm {z}, ymm, ymm" xed="VPERMW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutexvar_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="idx" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ id := idx[i+3:i]*16
+ dst[i+15:i] := a[id+15:id]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMW" form="ymm, ymm, ymm" xed="VPERMW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutexvar_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ id := idx[i+4:i]*16
+ IF k[j]
+ dst[i+15:i] := a[id+15:id]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMW" form="zmm {k}, zmm, zmm" xed="VPERMW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutexvar_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ id := idx[i+4:i]*16
+ IF k[j]
+ dst[i+15:i] := a[id+15:id]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMW" form="zmm {z}, zmm, zmm" xed="VPERMW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutexvar_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="idx" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ id := idx[i+4:i]*16
+ dst[i+15:i] := a[id+15:id]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMW" form="zmm, zmm, zmm" xed="VPERMW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permutexvar_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="idx" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ id := idx[i+2:i]*16
+ IF k[j]
+ dst[i+15:i] := a[id+15:id]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMW" form="xmm {k}, xmm, xmm" xed="VPERMW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permutexvar_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="idx" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ id := idx[i+2:i]*16
+ IF k[j]
+ dst[i+15:i] := a[id+15:id]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMW" form="xmm {z}, xmm, xmm" xed="VPERMW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_permutexvar_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="idx" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Shuffle 16-bit integers in "a" using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ id := idx[i+2:i]*16
+ dst[i+15:i] := a[id+15:id]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMW" form="xmm, xmm, xmm" xed="VPERMW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_maddubs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__m256i" varname="src" etype="SI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] )
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADDUBSW" form="ymm {k}, ymm, ymm" xed="VPMADDUBSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_maddubs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] )
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADDUBSW" form="ymm {z}, ymm, ymm" xed="VPMADDUBSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maddubs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI16"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] )
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADDUBSW" form="zmm, zmm, zmm" xed="VPMADDUBSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_maddubs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI16"/>
+ <parameter type="__m512i" varname="src" etype="SI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] )
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADDUBSW" form="zmm {k}, zmm, zmm" xed="VPMADDUBSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_maddubs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] )
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADDUBSW" form="zmm {z}, zmm, zmm" xed="VPMADDUBSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_maddubs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="src" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] )
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMADDUBSW" form="xmm {k}, xmm, xmm" xed="VPMADDUBSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_maddubs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Multiply packed unsigned 8-bit integers in "a" by packed signed 8-bit integers in "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] )
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMADDUBSW" form="xmm {z}, xmm, xmm" xed="VPMADDUBSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_madd_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADDWD" form="ymm {k}, ymm, ymm" xed="VPMADDWD_YMMi32_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_madd_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADDWD" form="ymm {z}, ymm, ymm" xed="VPMADDWD_YMMi32_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_madd_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADDWD" form="zmm, zmm, zmm" xed="VPMADDWD_ZMMi32_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_madd_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADDWD" form="zmm {k}, zmm, zmm" xed="VPMADDWD_ZMMi32_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_madd_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADDWD" form="zmm {z}, zmm, zmm" xed="VPMADDWD_ZMMi32_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_madd_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMADDWD" form="xmm {k}, xmm, xmm" xed="VPMADDWD_XMMi32_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_madd_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMADDWD" form="xmm {z}, xmm, xmm" xed="VPMADDWD_XMMi32_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_max_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSB" form="ymm {k}, ymm, ymm" xed="VPMAXSB_YMMi8_MASKmskw_YMMi8_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_max_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSB" form="ymm {z}, ymm, ymm" xed="VPMAXSB_YMMi8_MASKmskw_YMMi8_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_max_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSB" form="zmm {k}, zmm, zmm" xed="VPMAXSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSB" form="zmm {z}, zmm, zmm" xed="VPMAXSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_max_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSB" form="zmm, zmm, zmm" xed="VPMAXSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXSB" form="xmm {k}, xmm, xmm" xed="VPMAXSB_XMMi8_MASKmskw_XMMi8_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXSB" form="xmm {z}, xmm, xmm" xed="VPMAXSB_XMMi8_MASKmskw_XMMi8_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_max_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSW" form="ymm {k}, ymm, ymm" xed="VPMAXSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_max_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSW" form="ymm {z}, ymm, ymm" xed="VPMAXSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_max_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSW" form="zmm {k}, zmm, zmm" xed="VPMAXSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSW" form="zmm {z}, zmm, zmm" xed="VPMAXSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_max_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSW" form="zmm, zmm, zmm" xed="VPMAXSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXSW" form="xmm {k}, xmm, xmm" xed="VPMAXSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXSW" form="xmm {z}, xmm, xmm" xed="VPMAXSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_max_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUB" form="ymm {k}, ymm, ymm" xed="VPMAXUB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_max_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUB" form="ymm {z}, ymm, ymm" xed="VPMAXUB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_max_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUB" form="zmm {k}, zmm, zmm" xed="VPMAXUB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUB" form="zmm {z}, zmm, zmm" xed="VPMAXUB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_max_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUB" form="zmm, zmm, zmm" xed="VPMAXUB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXUB" form="xmm {k}, xmm, xmm" xed="VPMAXUB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXUB" form="xmm {z}, xmm, xmm" xed="VPMAXUB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_max_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUW" form="ymm {k}, ymm, ymm" xed="VPMAXUW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_max_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUW" form="ymm {z}, ymm, ymm" xed="VPMAXUW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_max_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUW" form="zmm {k}, zmm, zmm" xed="VPMAXUW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUW" form="zmm {z}, zmm, zmm" xed="VPMAXUW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_max_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUW" form="zmm, zmm, zmm" xed="VPMAXUW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXUW" form="xmm {k}, xmm, xmm" xed="VPMAXUW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXUW" form="xmm {z}, xmm, xmm" xed="VPMAXUW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_min_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSB" form="ymm {k}, ymm, ymm" xed="VPMINSB_YMMi8_MASKmskw_YMMi8_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_min_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSB" form="ymm {z}, ymm, ymm" xed="VPMINSB_YMMi8_MASKmskw_YMMi8_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_min_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSB" form="zmm {k}, zmm, zmm" xed="VPMINSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSB" form="zmm {z}, zmm, zmm" xed="VPMINSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_min_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSB" form="zmm, zmm, zmm" xed="VPMINSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINSB" form="xmm {k}, xmm, xmm" xed="VPMINSB_XMMi8_MASKmskw_XMMi8_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINSB" form="xmm {z}, xmm, xmm" xed="VPMINSB_XMMi8_MASKmskw_XMMi8_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_min_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSW" form="ymm {k}, ymm, ymm" xed="VPMINSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_min_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSW" form="ymm {z}, ymm, ymm" xed="VPMINSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_min_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSW" form="zmm {k}, zmm, zmm" xed="VPMINSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSW" form="zmm {z}, zmm, zmm" xed="VPMINSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_min_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSW" form="zmm, zmm, zmm" xed="VPMINSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINSW" form="xmm {k}, xmm, xmm" xed="VPMINSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINSW" form="xmm {z}, xmm, xmm" xed="VPMINSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_min_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUB" form="ymm {k}, ymm, ymm" xed="VPMINUB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_min_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUB" form="ymm {z}, ymm, ymm" xed="VPMINUB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_min_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUB" form="zmm {k}, zmm, zmm" xed="VPMINUB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUB" form="zmm {z}, zmm, zmm" xed="VPMINUB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_min_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUB" form="zmm, zmm, zmm" xed="VPMINUB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINUB" form="xmm {k}, xmm, xmm" xed="VPMINUB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINUB" form="xmm {z}, xmm, xmm" xed="VPMINUB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_min_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUW" form="ymm {k}, ymm, ymm" xed="VPMINUW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_min_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUW" form="ymm {z}, ymm, ymm" xed="VPMINUW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_min_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUW" form="zmm {k}, zmm, zmm" xed="VPMINUW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUW" form="zmm {z}, zmm, zmm" xed="VPMINUW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_min_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUW" form="zmm, zmm, zmm" xed="VPMINUW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINUW" form="xmm {k}, xmm, xmm" xed="VPMINUW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINUW" form="xmm {z}, xmm, xmm" xed="VPMINUW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_movepi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 8-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF a[i+7]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVB2M" form="k, ymm" xed="VPMOVB2M_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_movepi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 8-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF a[i+7]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVB2M" form="k, zmm" xed="VPMOVB2M_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_movepi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 8-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF a[i+7]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPMOVB2M" form="k, xmm" xed="VPMOVB2M_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_movm_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <description>Set each packed 8-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := 0xFF
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVM2B" form="ymm" xed="VPMOVM2B_YMMu8_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_movm_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <description>Set each packed 8-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := 0xFF
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVM2B" form="zmm" xed="VPMOVM2B_ZMMu8_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_movm_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Set each packed 8-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := 0xFF
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVM2B" form="xmm" xed="VPMOVM2B_XMMu8_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_movm_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Set each packed 16-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := 0xFFFF
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVM2W" form="ymm" xed="VPMOVM2W_YMMu16_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_movm_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <description>Set each packed 16-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := 0xFFFF
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVM2W" form="zmm" xed="VPMOVM2W_ZMMu16_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_movm_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Set each packed 16-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := 0xFFFF
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVM2W" form="xmm" xed="VPMOVM2W_XMMu16_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtsepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ dst[l+7:l] := Saturate8(a[i+15:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSWB" form="xmm, ymm" xed="VPMOVSWB_XMMi8_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="src" etype="SI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSWB" form="xmm {k}, ymm" xed="VPMOVSWB_XMMi8_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi16_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI8" memwidth="128"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+15:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSWB" form="m128 {k}, ymm" xed="VPMOVSWB_MEMi8_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtsepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSWB" form="xmm {z}, ymm" xed="VPMOVSWB_XMMi8_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtsepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI8"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ dst[l+7:l] := Saturate8(a[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSWB" form="ymm, zmm" xed="VPMOVSWB_YMMi8_MASKmskw_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI8"/>
+ <parameter type="__m256i" varname="src" etype="SI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSWB" form="ymm {k}, zmm" xed="VPMOVSWB_YMMi8_MASKmskw_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi16_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI8" memwidth="256"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+15:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSWB" form="m256 {k}, zmm" xed="VPMOVSWB_MEMi8_MASKmskw_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtsepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSWB" form="ymm {z}, zmm" xed="VPMOVSWB_YMMi8_MASKmskw_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtsepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ dst[l+7:l] := Saturate8(a[i+15:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSWB" form="xmm, xmm" xed="VPMOVSWB_XMMi8_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="src" etype="SI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSWB" form="xmm {k}, xmm" xed="VPMOVSWB_XMMi8_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi16_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI8" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+15:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSWB" form="m64 {k}, xmm" xed="VPMOVSWB_MEMi8_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtsepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSWB" form="xmm {z}, xmm" xed="VPMOVSWB_XMMi8_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__m256i" varname="src" etype="SI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := SignExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXBW" form="ymm {k}, xmm" xed="VPMOVSXBW_YMMi16_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := SignExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXBW" form="ymm {z}, xmm" xed="VPMOVSXBW_YMMi16_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI16"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ l := j*16
+ dst[l+15:l] := SignExtend16(a[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXBW" form="zmm, ymm" xed="VPMOVSXBW_ZMMi16_MASKmskw_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI16"/>
+ <parameter type="__m512i" varname="src" etype="SI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := SignExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXBW" form="zmm {k}, ymm" xed="VPMOVSXBW_ZMMi16_MASKmskw_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := SignExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXBW" form="zmm {z}, ymm" xed="VPMOVSXBW_ZMMi16_MASKmskw_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="src" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := SignExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXBW" form="xmm {k}, xmm" xed="VPMOVSXBW_XMMi16_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := SignExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXBW" form="xmm {z}, xmm" xed="VPMOVSXBW_XMMi16_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtusepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ dst[l+7:l] := SaturateU8(a[i+15:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSWB" form="xmm, ymm" xed="VPMOVUSWB_XMMu8_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSWB" form="xmm {k}, ymm" xed="VPMOVUSWB_XMMu8_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi16_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="128"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+15:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSWB" form="m128 {k}, ymm" xed="VPMOVUSWB_MEMu8_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtusepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSWB" form="xmm {z}, ymm" xed="VPMOVUSWB_XMMu8_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtusepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ dst[l+7:l] := SaturateU8(a[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVUSWB" form="ymm, zmm" xed="VPMOVUSWB_YMMu8_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVUSWB" form="ymm {k}, zmm" xed="VPMOVUSWB_YMMu8_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi16_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="256"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+15:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSWB" form="m256 {k}, zmm" xed="VPMOVUSWB_MEMu8_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtusepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVUSWB" form="ymm {z}, zmm" xed="VPMOVUSWB_YMMu8_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtusepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ dst[l+7:l] := SaturateU8(a[i+15:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSWB" form="xmm, xmm" xed="VPMOVUSWB_XMMu8_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSWB" form="xmm {k}, xmm" xed="VPMOVUSWB_XMMu8_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi16_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+15:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSWB" form="m64 {k}, xmm" xed="VPMOVUSWB_MEMu8_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtusepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSWB" form="xmm {z}, xmm" xed="VPMOVUSWB_XMMu8_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_movepi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 16-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF a[i+15]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPMOVW2M" form="k, ymm" xed="VPMOVW2M_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_movepi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 16-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF a[i+15]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVW2M" form="k, zmm" xed="VPMOVW2M_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_movepi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 16-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF a[i+15]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPMOVW2M" form="k, xmm" xed="VPMOVW2M_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ dst[l+7:l] := Truncate8(a[i+15:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVWB" form="xmm, ymm" xed="VPMOVWB_XMMu8_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVWB" form="xmm {k}, ymm" xed="VPMOVWB_XMMu8_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi16_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="128"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+15:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVWB" form="m128 {k}, ymm" xed="VPMOVWB_MEMu8_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVWB" form="xmm {z}, ymm" xed="VPMOVWB_XMMu8_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ dst[l+7:l] := Truncate8(a[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVWB" form="ymm, zmm" xed="VPMOVWB_YMMu8_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVWB" form="ymm {k}, zmm" xed="VPMOVWB_YMMu8_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi16_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="256"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+15:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVWB" form="m256 {k}, zmm" xed="VPMOVWB_MEMu8_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVWB" form="ymm {z}, zmm" xed="VPMOVWB_YMMu8_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ dst[l+7:l] := Truncate8(a[i+15:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVWB" form="xmm, xmm" xed="VPMOVWB_XMMu8_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVWB" form="xmm {k}, xmm" xed="VPMOVWB_XMMu8_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi16_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+15:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVWB" form="m64 {k}, xmm" xed="VPMOVWB_MEMu8_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi16_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+15:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVWB" form="xmm {z}, xmm" xed="VPMOVWB_XMMu8_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepu8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := ZeroExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXBW" form="ymm {k}, xmm" xed="VPMOVZXBW_YMMi16_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepu8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := ZeroExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXBW" form="ymm {z}, xmm" xed="VPMOVZXBW_YMMi16_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepu8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ l := j*16
+ dst[l+15:l] := ZeroExtend16(a[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXBW" form="zmm, ymm" xed="VPMOVZXBW_ZMMi16_MASKmskw_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepu8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := ZeroExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXBW" form="zmm {k}, ymm" xed="VPMOVZXBW_ZMMi16_MASKmskw_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepu8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := ZeroExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXBW" form="zmm {z}, ymm" xed="VPMOVZXBW_ZMMi16_MASKmskw_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepu8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := ZeroExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXBW" form="xmm {k}, xmm" xed="VPMOVZXBW_XMMi16_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepu8_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ l := j*16
+ IF k[j]
+ dst[l+15:l] := ZeroExtend16(a[i+7:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXBW" form="xmm {z}, xmm" xed="VPMOVZXBW_XMMi16_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mulhrs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) &gt;&gt; 14) + 1
+ dst[i+15:i] := tmp[16:1]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULHRSW" form="ymm {k}, ymm, ymm" xed="VPMULHRSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mulhrs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) &gt;&gt; 14) + 1
+ dst[i+15:i] := tmp[16:1]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULHRSW" form="ymm {z}, ymm, ymm" xed="VPMULHRSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_mulhrs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) &gt;&gt; 14) + 1
+ dst[i+15:i] := tmp[16:1]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHRSW" form="zmm {k}, zmm, zmm" xed="VPMULHRSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mulhrs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) &gt;&gt; 14) + 1
+ dst[i+15:i] := tmp[16:1]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHRSW" form="zmm {z}, zmm, zmm" xed="VPMULHRSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mulhrs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) &gt;&gt; 14) + 1
+ dst[i+15:i] := tmp[16:1]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHRSW" form="zmm, zmm, zmm" xed="VPMULHRSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mulhrs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) &gt;&gt; 14) + 1
+ dst[i+15:i] := tmp[16:1]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULHRSW" form="xmm {k}, xmm, xmm" xed="VPMULHRSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mulhrs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) &gt;&gt; 14) + 1
+ dst[i+15:i] := tmp[16:1]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULHRSW" form="xmm {z}, xmm, xmm" xed="VPMULHRSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mulhi_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULHUW" form="ymm {k}, ymm, ymm" xed="VPMULHUW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mulhi_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULHUW" form="ymm {z}, ymm, ymm" xed="VPMULHUW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_mulhi_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHUW" form="zmm {k}, zmm, zmm" xed="VPMULHUW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mulhi_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHUW" form="zmm {z}, zmm, zmm" xed="VPMULHUW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mulhi_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHUW" form="zmm, zmm, zmm" xed="VPMULHUW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mulhi_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULHUW" form="xmm {k}, xmm, xmm" xed="VPMULHUW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mulhi_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULHUW" form="xmm {z}, xmm, xmm" xed="VPMULHUW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mulhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULHW" form="ymm {k}, ymm, ymm" xed="VPMULHW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mulhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULHW" form="ymm {z}, ymm, ymm" xed="VPMULHW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_mulhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHW" form="zmm {k}, zmm, zmm" xed="VPMULHW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mulhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHW" form="zmm {z}, zmm, zmm" xed="VPMULHW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mulhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHW" form="zmm, zmm, zmm" xed="VPMULHW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mulhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULHW" form="xmm {k}, xmm, xmm" xed="VPMULHW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mulhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULHW" form="xmm {z}, xmm, xmm" xed="VPMULHW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mullo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[15:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULLW" form="ymm {k}, ymm, ymm" xed="VPMULLW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mullo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[15:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULLW" form="ymm {z}, ymm, ymm" xed="VPMULLW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_mullo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[15:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULLW" form="zmm {k}, zmm, zmm" xed="VPMULLW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mullo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[15:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULLW" form="zmm {z}, zmm, zmm" xed="VPMULLW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mullo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[15:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULLW" form="zmm, zmm, zmm" xed="VPMULLW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mullo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[15:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULLW" form="xmm {k}, xmm, xmm" xed="VPMULLW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mullo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[15:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULLW" form="xmm {z}, xmm, xmm" xed="VPMULLW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sad_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce eight unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit elements in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i])
+ENDFOR
+FOR j := 0 to 7
+ i := j*64
+ dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + \
+ tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56]
+ dst[i+63:i+16] := 0
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSADBW" form="zmm, zmm, zmm" xed="VPSADBW_ZMMu16_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shuffle_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ IF b[i+7] == 1
+ dst[i+7:i] := 0
+ ELSE
+ index[4:0] := b[i+3:i] + (j &amp; 0x10)
+ dst[i+7:i] := a[index*8+7:index*8]
+ FI
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFB" form="ymm {k}, ymm, ymm" xed="VPSHUFB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shuffle_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ IF b[i+7] == 1
+ dst[i+7:i] := 0
+ ELSE
+ index[4:0] := b[i+3:i] + (j &amp; 0x10)
+ dst[i+7:i] := a[index*8+7:index*8]
+ FI
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFB" form="ymm {z}, ymm, ymm" xed="VPSHUFB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shuffle_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" within 128-bit lanes using the control in the corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ IF b[i+7] == 1
+ dst[i+7:i] := 0
+ ELSE
+ index[5:0] := b[i+3:i] + (j &amp; 0x30)
+ dst[i+7:i] := a[index*8+7:index*8]
+ FI
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFB" form="zmm {k}, zmm, zmm" xed="VPSHUFB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shuffle_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ IF b[i+7] == 1
+ dst[i+7:i] := 0
+ ELSE
+ index[5:0] := b[i+3:i] + (j &amp; 0x30)
+ dst[i+7:i] := a[index*8+7:index*8]
+ FI
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFB" form="zmm {z}, zmm, zmm" xed="VPSHUFB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shuffle_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF b[i+7] == 1
+ dst[i+7:i] := 0
+ ELSE
+ index[5:0] := b[i+3:i] + (j &amp; 0x30)
+ dst[i+7:i] := a[index*8+7:index*8]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFB" form="zmm, zmm, zmm" xed="VPSHUFB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shuffle_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ IF b[i+7] == 1
+ dst[i+7:i] := 0
+ ELSE
+ index[3:0] := b[i+3:i]
+ dst[i+7:i] := a[index*8+7:index*8]
+ FI
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHUFB" form="xmm {k}, xmm, xmm" xed="VPSHUFB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shuffle_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ IF b[i+7] == 1
+ dst[i+7:i] := 0
+ ELSE
+ index[3:0] := b[i+3:i]
+ dst[i+7:i] := a[index*8+7:index*8]
+ FI
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHUFB" form="xmm {z}, xmm, xmm" xed="VPSHUFB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shufflehi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := a[63:0]
+tmp_dst[79:64] := (a &gt;&gt; (imm8[1:0] * 16))[79:64]
+tmp_dst[95:80] := (a &gt;&gt; (imm8[3:2] * 16))[79:64]
+tmp_dst[111:96] := (a &gt;&gt; (imm8[5:4] * 16))[79:64]
+tmp_dst[127:112] := (a &gt;&gt; (imm8[7:6] * 16))[79:64]
+tmp_dst[191:128] := a[191:128]
+tmp_dst[207:192] := (a &gt;&gt; (imm8[1:0] * 16))[207:192]
+tmp_dst[223:208] := (a &gt;&gt; (imm8[3:2] * 16))[207:192]
+tmp_dst[239:224] := (a &gt;&gt; (imm8[5:4] * 16))[207:192]
+tmp_dst[255:240] := (a &gt;&gt; (imm8[7:6] * 16))[207:192]
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFHW" form="ymm {k}, ymm, imm8" xed="VPSHUFHW_YMMu16_MASKmskw_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shufflehi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := a[63:0]
+tmp_dst[79:64] := (a &gt;&gt; (imm8[1:0] * 16))[79:64]
+tmp_dst[95:80] := (a &gt;&gt; (imm8[3:2] * 16))[79:64]
+tmp_dst[111:96] := (a &gt;&gt; (imm8[5:4] * 16))[79:64]
+tmp_dst[127:112] := (a &gt;&gt; (imm8[7:6] * 16))[79:64]
+tmp_dst[191:128] := a[191:128]
+tmp_dst[207:192] := (a &gt;&gt; (imm8[1:0] * 16))[207:192]
+tmp_dst[223:208] := (a &gt;&gt; (imm8[3:2] * 16))[207:192]
+tmp_dst[239:224] := (a &gt;&gt; (imm8[5:4] * 16))[207:192]
+tmp_dst[255:240] := (a &gt;&gt; (imm8[7:6] * 16))[207:192]
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFHW" form="ymm {z}, ymm, imm8" xed="VPSHUFHW_YMMu16_MASKmskw_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shufflehi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := a[63:0]
+tmp_dst[79:64] := (a &gt;&gt; (imm8[1:0] * 16))[79:64]
+tmp_dst[95:80] := (a &gt;&gt; (imm8[3:2] * 16))[79:64]
+tmp_dst[111:96] := (a &gt;&gt; (imm8[5:4] * 16))[79:64]
+tmp_dst[127:112] := (a &gt;&gt; (imm8[7:6] * 16))[79:64]
+tmp_dst[191:128] := a[191:128]
+tmp_dst[207:192] := (a &gt;&gt; (imm8[1:0] * 16))[207:192]
+tmp_dst[223:208] := (a &gt;&gt; (imm8[3:2] * 16))[207:192]
+tmp_dst[239:224] := (a &gt;&gt; (imm8[5:4] * 16))[207:192]
+tmp_dst[255:240] := (a &gt;&gt; (imm8[7:6] * 16))[207:192]
+tmp_dst[319:256] := a[319:256]
+tmp_dst[335:320] := (a &gt;&gt; (imm8[1:0] * 16))[335:320]
+tmp_dst[351:336] := (a &gt;&gt; (imm8[3:2] * 16))[335:320]
+tmp_dst[367:352] := (a &gt;&gt; (imm8[5:4] * 16))[335:320]
+tmp_dst[383:368] := (a &gt;&gt; (imm8[7:6] * 16))[335:320]
+tmp_dst[447:384] := a[447:384]
+tmp_dst[463:448] := (a &gt;&gt; (imm8[1:0] * 16))[463:448]
+tmp_dst[479:464] := (a &gt;&gt; (imm8[3:2] * 16))[463:448]
+tmp_dst[495:480] := (a &gt;&gt; (imm8[5:4] * 16))[463:448]
+tmp_dst[511:496] := (a &gt;&gt; (imm8[7:6] * 16))[463:448]
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFHW" form="zmm {k}, zmm, imm8" xed="VPSHUFHW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shufflehi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := a[63:0]
+tmp_dst[79:64] := (a &gt;&gt; (imm8[1:0] * 16))[79:64]
+tmp_dst[95:80] := (a &gt;&gt; (imm8[3:2] * 16))[79:64]
+tmp_dst[111:96] := (a &gt;&gt; (imm8[5:4] * 16))[79:64]
+tmp_dst[127:112] := (a &gt;&gt; (imm8[7:6] * 16))[79:64]
+tmp_dst[191:128] := a[191:128]
+tmp_dst[207:192] := (a &gt;&gt; (imm8[1:0] * 16))[207:192]
+tmp_dst[223:208] := (a &gt;&gt; (imm8[3:2] * 16))[207:192]
+tmp_dst[239:224] := (a &gt;&gt; (imm8[5:4] * 16))[207:192]
+tmp_dst[255:240] := (a &gt;&gt; (imm8[7:6] * 16))[207:192]
+tmp_dst[319:256] := a[319:256]
+tmp_dst[335:320] := (a &gt;&gt; (imm8[1:0] * 16))[335:320]
+tmp_dst[351:336] := (a &gt;&gt; (imm8[3:2] * 16))[335:320]
+tmp_dst[367:352] := (a &gt;&gt; (imm8[5:4] * 16))[335:320]
+tmp_dst[383:368] := (a &gt;&gt; (imm8[7:6] * 16))[335:320]
+tmp_dst[447:384] := a[447:384]
+tmp_dst[463:448] := (a &gt;&gt; (imm8[1:0] * 16))[463:448]
+tmp_dst[479:464] := (a &gt;&gt; (imm8[3:2] * 16))[463:448]
+tmp_dst[495:480] := (a &gt;&gt; (imm8[5:4] * 16))[463:448]
+tmp_dst[511:496] := (a &gt;&gt; (imm8[7:6] * 16))[463:448]
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFHW" form="zmm {z}, zmm, imm8" xed="VPSHUFHW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shufflehi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the high 64 bits of 128-bit lanes of "dst", with the low 64 bits of 128-bit lanes being copied from from "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[79:64] := (a &gt;&gt; (imm8[1:0] * 16))[79:64]
+dst[95:80] := (a &gt;&gt; (imm8[3:2] * 16))[79:64]
+dst[111:96] := (a &gt;&gt; (imm8[5:4] * 16))[79:64]
+dst[127:112] := (a &gt;&gt; (imm8[7:6] * 16))[79:64]
+dst[191:128] := a[191:128]
+dst[207:192] := (a &gt;&gt; (imm8[1:0] * 16))[207:192]
+dst[223:208] := (a &gt;&gt; (imm8[3:2] * 16))[207:192]
+dst[239:224] := (a &gt;&gt; (imm8[5:4] * 16))[207:192]
+dst[255:240] := (a &gt;&gt; (imm8[7:6] * 16))[207:192]
+dst[319:256] := a[319:256]
+dst[335:320] := (a &gt;&gt; (imm8[1:0] * 16))[335:320]
+dst[351:336] := (a &gt;&gt; (imm8[3:2] * 16))[335:320]
+dst[367:352] := (a &gt;&gt; (imm8[5:4] * 16))[335:320]
+dst[383:368] := (a &gt;&gt; (imm8[7:6] * 16))[335:320]
+dst[447:384] := a[447:384]
+dst[463:448] := (a &gt;&gt; (imm8[1:0] * 16))[463:448]
+dst[479:464] := (a &gt;&gt; (imm8[3:2] * 16))[463:448]
+dst[495:480] := (a &gt;&gt; (imm8[5:4] * 16))[463:448]
+dst[511:496] := (a &gt;&gt; (imm8[7:6] * 16))[463:448]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFHW" form="zmm, zmm, imm8" xed="VPSHUFHW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shufflehi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". Store the results in the high 64 bits of "dst", with the low 64 bits being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := a[63:0]
+tmp_dst[79:64] := (a &gt;&gt; (imm8[1:0] * 16))[79:64]
+tmp_dst[95:80] := (a &gt;&gt; (imm8[3:2] * 16))[79:64]
+tmp_dst[111:96] := (a &gt;&gt; (imm8[5:4] * 16))[79:64]
+tmp_dst[127:112] := (a &gt;&gt; (imm8[7:6] * 16))[79:64]
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHUFHW" form="xmm {k}, xmm, imm8" xed="VPSHUFHW_XMMu16_MASKmskw_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shufflehi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". Store the results in the high 64 bits of "dst", with the low 64 bits being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := a[63:0]
+tmp_dst[79:64] := (a &gt;&gt; (imm8[1:0] * 16))[79:64]
+tmp_dst[95:80] := (a &gt;&gt; (imm8[3:2] * 16))[79:64]
+tmp_dst[111:96] := (a &gt;&gt; (imm8[5:4] * 16))[79:64]
+tmp_dst[127:112] := (a &gt;&gt; (imm8[7:6] * 16))[79:64]
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHUFHW" form="xmm {z}, xmm, imm8" xed="VPSHUFHW_XMMu16_MASKmskw_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shufflelo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := (a &gt;&gt; (imm8[1:0] * 16))[15:0]
+tmp_dst[31:16] := (a &gt;&gt; (imm8[3:2] * 16))[15:0]
+tmp_dst[47:32] := (a &gt;&gt; (imm8[5:4] * 16))[15:0]
+tmp_dst[63:48] := (a &gt;&gt; (imm8[7:6] * 16))[15:0]
+tmp_dst[127:64] := a[127:64]
+tmp_dst[143:128] := (a &gt;&gt; (imm8[1:0] * 16))[143:128]
+tmp_dst[159:144] := (a &gt;&gt; (imm8[3:2] * 16))[143:128]
+tmp_dst[175:160] := (a &gt;&gt; (imm8[5:4] * 16))[143:128]
+tmp_dst[191:176] := (a &gt;&gt; (imm8[7:6] * 16))[143:128]
+tmp_dst[255:192] := a[255:192]
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFLW" form="ymm {k}, ymm, imm8" xed="VPSHUFLW_YMMu16_MASKmskw_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shufflelo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := (a &gt;&gt; (imm8[1:0] * 16))[15:0]
+tmp_dst[31:16] := (a &gt;&gt; (imm8[3:2] * 16))[15:0]
+tmp_dst[47:32] := (a &gt;&gt; (imm8[5:4] * 16))[15:0]
+tmp_dst[63:48] := (a &gt;&gt; (imm8[7:6] * 16))[15:0]
+tmp_dst[127:64] := a[127:64]
+tmp_dst[143:128] := (a &gt;&gt; (imm8[1:0] * 16))[143:128]
+tmp_dst[159:144] := (a &gt;&gt; (imm8[3:2] * 16))[143:128]
+tmp_dst[175:160] := (a &gt;&gt; (imm8[5:4] * 16))[143:128]
+tmp_dst[191:176] := (a &gt;&gt; (imm8[7:6] * 16))[143:128]
+tmp_dst[255:192] := a[255:192]
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFLW" form="ymm {z}, ymm, imm8" xed="VPSHUFLW_YMMu16_MASKmskw_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shufflelo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := (a &gt;&gt; (imm8[1:0] * 16))[15:0]
+tmp_dst[31:16] := (a &gt;&gt; (imm8[3:2] * 16))[15:0]
+tmp_dst[47:32] := (a &gt;&gt; (imm8[5:4] * 16))[15:0]
+tmp_dst[63:48] := (a &gt;&gt; (imm8[7:6] * 16))[15:0]
+tmp_dst[127:64] := a[127:64]
+tmp_dst[143:128] := (a &gt;&gt; (imm8[1:0] * 16))[143:128]
+tmp_dst[159:144] := (a &gt;&gt; (imm8[3:2] * 16))[143:128]
+tmp_dst[175:160] := (a &gt;&gt; (imm8[5:4] * 16))[143:128]
+tmp_dst[191:176] := (a &gt;&gt; (imm8[7:6] * 16))[143:128]
+tmp_dst[255:192] := a[255:192]
+tmp_dst[271:256] := (a &gt;&gt; (imm8[1:0] * 16))[271:256]
+tmp_dst[287:272] := (a &gt;&gt; (imm8[3:2] * 16))[271:256]
+tmp_dst[303:288] := (a &gt;&gt; (imm8[5:4] * 16))[271:256]
+tmp_dst[319:304] := (a &gt;&gt; (imm8[7:6] * 16))[271:256]
+tmp_dst[383:320] := a[383:320]
+tmp_dst[399:384] := (a &gt;&gt; (imm8[1:0] * 16))[399:384]
+tmp_dst[415:400] := (a &gt;&gt; (imm8[3:2] * 16))[399:384]
+tmp_dst[431:416] := (a &gt;&gt; (imm8[5:4] * 16))[399:384]
+tmp_dst[447:432] := (a &gt;&gt; (imm8[7:6] * 16))[399:384]
+tmp_dst[511:448] := a[511:448]
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFLW" form="zmm {k}, zmm, imm8" xed="VPSHUFLW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shufflelo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := (a &gt;&gt; (imm8[1:0] * 16))[15:0]
+tmp_dst[31:16] := (a &gt;&gt; (imm8[3:2] * 16))[15:0]
+tmp_dst[47:32] := (a &gt;&gt; (imm8[5:4] * 16))[15:0]
+tmp_dst[63:48] := (a &gt;&gt; (imm8[7:6] * 16))[15:0]
+tmp_dst[127:64] := a[127:64]
+tmp_dst[143:128] := (a &gt;&gt; (imm8[1:0] * 16))[143:128]
+tmp_dst[159:144] := (a &gt;&gt; (imm8[3:2] * 16))[143:128]
+tmp_dst[175:160] := (a &gt;&gt; (imm8[5:4] * 16))[143:128]
+tmp_dst[191:176] := (a &gt;&gt; (imm8[7:6] * 16))[143:128]
+tmp_dst[255:192] := a[255:192]
+tmp_dst[271:256] := (a &gt;&gt; (imm8[1:0] * 16))[271:256]
+tmp_dst[287:272] := (a &gt;&gt; (imm8[3:2] * 16))[271:256]
+tmp_dst[303:288] := (a &gt;&gt; (imm8[5:4] * 16))[271:256]
+tmp_dst[319:304] := (a &gt;&gt; (imm8[7:6] * 16))[271:256]
+tmp_dst[383:320] := a[383:320]
+tmp_dst[399:384] := (a &gt;&gt; (imm8[1:0] * 16))[399:384]
+tmp_dst[415:400] := (a &gt;&gt; (imm8[3:2] * 16))[399:384]
+tmp_dst[431:416] := (a &gt;&gt; (imm8[5:4] * 16))[399:384]
+tmp_dst[447:432] := (a &gt;&gt; (imm8[7:6] * 16))[399:384]
+tmp_dst[511:448] := a[511:448]
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFLW" form="zmm {z}, zmm, imm8" xed="VPSHUFLW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shufflelo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of "a" using the control in "imm8". Store the results in the low 64 bits of 128-bit lanes of "dst", with the high 64 bits of 128-bit lanes being copied from from "a" to "dst".</description>
+ <operation>
+dst[15:0] := (a &gt;&gt; (imm8[1:0] * 16))[15:0]
+dst[31:16] := (a &gt;&gt; (imm8[3:2] * 16))[15:0]
+dst[47:32] := (a &gt;&gt; (imm8[5:4] * 16))[15:0]
+dst[63:48] := (a &gt;&gt; (imm8[7:6] * 16))[15:0]
+dst[127:64] := a[127:64]
+dst[143:128] := (a &gt;&gt; (imm8[1:0] * 16))[143:128]
+dst[159:144] := (a &gt;&gt; (imm8[3:2] * 16))[143:128]
+dst[175:160] := (a &gt;&gt; (imm8[5:4] * 16))[143:128]
+dst[191:176] := (a &gt;&gt; (imm8[7:6] * 16))[143:128]
+dst[255:192] := a[255:192]
+dst[271:256] := (a &gt;&gt; (imm8[1:0] * 16))[271:256]
+dst[287:272] := (a &gt;&gt; (imm8[3:2] * 16))[271:256]
+dst[303:288] := (a &gt;&gt; (imm8[5:4] * 16))[271:256]
+dst[319:304] := (a &gt;&gt; (imm8[7:6] * 16))[271:256]
+dst[383:320] := a[383:320]
+dst[399:384] := (a &gt;&gt; (imm8[1:0] * 16))[399:384]
+dst[415:400] := (a &gt;&gt; (imm8[3:2] * 16))[399:384]
+dst[431:416] := (a &gt;&gt; (imm8[5:4] * 16))[399:384]
+dst[447:432] := (a &gt;&gt; (imm8[7:6] * 16))[399:384]
+dst[511:448] := a[511:448]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFLW" form="zmm, zmm, imm8" xed="VPSHUFLW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shufflelo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". Store the results in the low 64 bits of "dst", with the high 64 bits being copied from from "a" to "dst", using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := (a &gt;&gt; (imm8[1:0] * 16))[15:0]
+tmp_dst[31:16] := (a &gt;&gt; (imm8[3:2] * 16))[15:0]
+tmp_dst[47:32] := (a &gt;&gt; (imm8[5:4] * 16))[15:0]
+tmp_dst[63:48] := (a &gt;&gt; (imm8[7:6] * 16))[15:0]
+tmp_dst[127:64] := a[127:64]
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHUFLW" form="xmm {k}, xmm, imm8" xed="VPSHUFLW_XMMu16_MASKmskw_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shufflelo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". Store the results in the low 64 bits of "dst", with the high 64 bits being copied from from "a" to "dst", using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[15:0] := (a &gt;&gt; (imm8[1:0] * 16))[15:0]
+tmp_dst[31:16] := (a &gt;&gt; (imm8[3:2] * 16))[15:0]
+tmp_dst[47:32] := (a &gt;&gt; (imm8[5:4] * 16))[15:0]
+tmp_dst[63:48] := (a &gt;&gt; (imm8[7:6] * 16))[15:0]
+tmp_dst[127:64] := a[127:64]
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHUFLW" form="xmm {z}, xmm, imm8" xed="VPSHUFLW_XMMu16_MASKmskw_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_bslli_epi128">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="M128"/>
+ <parameter type="__m512i" varname="a" etype="M128"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift 128-bit lanes in "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+tmp := imm8[7:0]
+IF tmp &gt; 15
+ tmp := 16
+FI
+dst[127:0] := a[127:0] &lt;&lt; (tmp*8)
+dst[255:128] := a[255:128] &lt;&lt; (tmp*8)
+dst[383:256] := a[383:256] &lt;&lt; (tmp*8)
+dst[511:384] := a[511:384] &lt;&lt; (tmp*8)
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLDQ" form="zmm, zmm, imm8" xed="VPSLLDQ_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sllv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLVW" form="ymm {k}, ymm, ymm" xed="VPSLLVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sllv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLVW" form="ymm {z}, ymm, ymm" xed="VPSLLVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_sllv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLVW" form="ymm, ymm, ymm" xed="VPSLLVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sllv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLVW" form="zmm {k}, zmm, zmm" xed="VPSLLVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sllv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLVW" form="zmm {z}, zmm, zmm" xed="VPSLLVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sllv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLVW" form="zmm, zmm, zmm" xed="VPSLLVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sllv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLVW" form="xmm {k}, xmm, xmm" xed="VPSLLVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sllv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLVW" form="xmm {z}, xmm, xmm" xed="VPSLLVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_sllv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLVW" form="xmm, xmm, xmm" xed="VPSLLVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sll_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLW" form="ymm {k}, ymm, xmm" xed="VPSLLW_YMMu16_MASKmskw_YMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_slli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLW" form="ymm {k}, ymm, imm8" xed="VPSLLW_YMMu16_MASKmskw_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sll_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLW" form="ymm {z}, ymm, xmm" xed="VPSLLW_YMMu16_MASKmskw_YMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_slli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLW" form="ymm {z}, ymm, imm8" xed="VPSLLW_YMMu16_MASKmskw_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sll_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLW" form="zmm {k}, zmm, xmm" xed="VPSLLW_ZMMu16_MASKmskw_ZMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_slli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLW" form="zmm {k}, zmm, imm8" xed="VPSLLW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sll_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLW" form="zmm {z}, zmm, xmm" xed="VPSLLW_ZMMu16_MASKmskw_ZMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_slli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLW" form="zmm {z}, zmm, imm8" xed="VPSLLW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sll_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLW" form="zmm, zmm, xmm" xed="VPSLLW_ZMMu16_MASKmskw_ZMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_slli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLW" form="zmm, zmm, imm8" xed="VPSLLW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sll_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLW" form="xmm {k}, xmm, xmm" xed="VPSLLW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_slli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLW" form="xmm {k}, xmm, imm8" xed="VPSLLW_XMMu16_MASKmskw_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sll_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLW" form="xmm {z}, xmm, xmm" xed="VPSLLW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_slli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLW" form="xmm {z}, xmm, imm8" xed="VPSLLW_XMMu16_MASKmskw_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srav_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0)
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAVW" form="ymm {k}, ymm, ymm" xed="VPSRAVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srav_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0)
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAVW" form="ymm {z}, ymm, ymm" xed="VPSRAVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_srav_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0)
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAVW" form="ymm, ymm, ymm" xed="VPSRAVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_srav_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0)
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAVW" form="zmm {k}, zmm, zmm" xed="VPSRAVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srav_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0)
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAVW" form="zmm {z}, zmm, zmm" xed="VPSRAVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_srav_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0)
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAVW" form="zmm, zmm, zmm" xed="VPSRAVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srav_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0)
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAVW" form="xmm {k}, xmm, xmm" xed="VPSRAVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srav_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0)
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAVW" form="xmm {z}, xmm, xmm" xed="VPSRAVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_srav_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0)
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAVW" form="xmm, xmm, xmm" xed="VPSRAVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sra_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAW" form="ymm {k}, ymm, xmm" xed="VPSRAW_YMMu16_MASKmskw_YMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srai_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAW" form="ymm {k}, ymm, imm8" xed="VPSRAW_YMMu16_MASKmskw_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sra_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAW" form="ymm {z}, ymm, xmm" xed="VPSRAW_YMMu16_MASKmskw_YMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srai_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAW" form="ymm {z}, ymm, imm8" xed="VPSRAW_YMMu16_MASKmskw_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sra_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAW" form="zmm {k}, zmm, xmm" xed="VPSRAW_ZMMu16_MASKmskw_ZMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_srai_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAW" form="zmm {k}, zmm, imm8" xed="VPSRAW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sra_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAW" form="zmm {z}, zmm, xmm" xed="VPSRAW_ZMMu16_MASKmskw_ZMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srai_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAW" form="zmm {z}, zmm, imm8" xed="VPSRAW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sra_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAW" form="zmm, zmm, xmm" xed="VPSRAW_ZMMu16_MASKmskw_ZMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_srai_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAW" form="zmm, zmm, imm8" xed="VPSRAW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sra_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAW" form="xmm {k}, xmm, xmm" xed="VPSRAW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srai_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAW" form="xmm {k}, xmm, imm8" xed="VPSRAW_XMMu16_MASKmskw_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sra_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAW" form="xmm {z}, xmm, xmm" xed="VPSRAW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srai_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAW" form="xmm {z}, xmm, imm8" xed="VPSRAW_XMMu16_MASKmskw_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_bsrli_epi128">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="M128"/>
+ <parameter type="__m512i" varname="a" etype="M128"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift 128-bit lanes in "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+tmp := imm8[7:0]
+IF tmp &gt; 15
+ tmp := 16
+FI
+dst[127:0] := a[127:0] &gt;&gt; (tmp*8)
+dst[255:128] := a[255:128] &gt;&gt; (tmp*8)
+dst[383:256] := a[383:256] &gt;&gt; (tmp*8)
+dst[511:384] := a[511:384] &gt;&gt; (tmp*8)
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLDQ" form="zmm, zmm, imm8" xed="VPSRLDQ_ZMMu8_ZMMu8_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srlv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLVW" form="ymm {k}, ymm, ymm" xed="VPSRLVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srlv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLVW" form="ymm {z}, ymm, ymm" xed="VPSRLVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_srlv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLVW" form="ymm, ymm, ymm" xed="VPSRLVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_srlv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLVW" form="zmm {k}, zmm, zmm" xed="VPSRLVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srlv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLVW" form="zmm {z}, zmm, zmm" xed="VPSRLVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_srlv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLVW" form="zmm, zmm, zmm" xed="VPSRLVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srlv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLVW" form="xmm {k}, xmm, xmm" xed="VPSRLVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srlv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLVW" form="xmm {z}, xmm, xmm" xed="VPSRLVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_srlv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF count[i+15:i] &lt; 16
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLVW" form="xmm, xmm, xmm" xed="VPSRLVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srl_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLW" form="ymm {k}, ymm, xmm" xed="VPSRLW_YMMu16_MASKmskw_YMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLW" form="ymm {k}, ymm, imm8" xed="VPSRLW_YMMu16_MASKmskw_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srl_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLW" form="ymm {z}, ymm, xmm" xed="VPSRLW_YMMu16_MASKmskw_YMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLW" form="ymm {z}, ymm, imm8" xed="VPSRLW_YMMu16_MASKmskw_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_srl_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLW" form="zmm {k}, zmm, xmm" xed="VPSRLW_ZMMu16_MASKmskw_ZMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_srli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLW" form="zmm {k}, zmm, imm8" xed="VPSRLW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srl_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLW" form="zmm {z}, zmm, xmm" xed="VPSRLW_ZMMu16_MASKmskw_ZMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLW" form="zmm {z}, zmm, imm8" xed="VPSRLW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_srl_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLW" form="zmm, zmm, xmm" xed="VPSRLW_ZMMu16_MASKmskw_ZMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_srli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLW" form="zmm, zmm, imm8" xed="VPSRLW_ZMMu16_MASKmskw_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srl_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLW" form="xmm {k}, xmm, xmm" xed="VPSRLW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLW" form="xmm {k}, xmm, imm8" xed="VPSRLW_XMMu16_MASKmskw_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srl_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLW" form="xmm {z}, xmm, xmm" xed="VPSRLW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srli_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLW" form="xmm {z}, xmm, imm8" xed="VPSRLW_XMMu16_MASKmskw_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sub_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] - b[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBB" form="ymm {k}, ymm, ymm" xed="VPSUBB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sub_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] - b[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBB" form="ymm {z}, ymm, ymm" xed="VPSUBB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sub_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] - b[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBB" form="zmm {k}, zmm, zmm" xed="VPSUBB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sub_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] - b[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBB" form="zmm {z}, zmm, zmm" xed="VPSUBB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sub_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := a[i+7:i] - b[i+7:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBB" form="zmm, zmm, zmm" xed="VPSUBB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sub_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] - b[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBB" form="xmm {k}, xmm, xmm" xed="VPSUBB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sub_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[i+7:i] - b[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBB" form="xmm {z}, xmm, xmm" xed="VPSUBB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_subs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBSB" form="ymm {k}, ymm, ymm" xed="VPSUBSB_YMMi8_MASKmskw_YMMi8_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_subs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBSB" form="ymm {z}, ymm, ymm" xed="VPSUBSB_YMMi8_MASKmskw_YMMi8_YMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_subs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBSB" form="zmm {k}, zmm, zmm" xed="VPSUBSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_subs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBSB" form="zmm {z}, zmm, zmm" xed="VPSUBSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_subs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBSB" form="zmm, zmm, zmm" xed="VPSUBSB_ZMMi8_MASKmskw_ZMMi8_ZMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_subs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBSB" form="xmm {k}, xmm, xmm" xed="VPSUBSB_XMMi8_MASKmskw_XMMi8_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_subs_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBSB" form="xmm {z}, xmm, xmm" xed="VPSUBSB_XMMi8_MASKmskw_XMMi8_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_subs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBSW" form="ymm {k}, ymm, ymm" xed="VPSUBSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_subs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBSW" form="ymm {z}, ymm, ymm" xed="VPSUBSW_YMMi16_MASKmskw_YMMi16_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_subs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBSW" form="zmm {k}, zmm, zmm" xed="VPSUBSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_subs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBSW" form="zmm {z}, zmm, zmm" xed="VPSUBSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_subs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBSW" form="zmm, zmm, zmm" xed="VPSUBSW_ZMMi16_MASKmskw_ZMMi16_ZMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_subs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBSW" form="xmm {k}, xmm, xmm" xed="VPSUBSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_subs_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBSW" form="xmm {z}, xmm, xmm" xed="VPSUBSW_XMMi16_MASKmskw_XMMi16_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_subs_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBUSB" form="ymm {k}, ymm, ymm" xed="VPSUBUSB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_subs_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBUSB" form="ymm {z}, ymm, ymm" xed="VPSUBUSB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_subs_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBUSB" form="zmm {k}, zmm, zmm" xed="VPSUBUSB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_subs_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBUSB" form="zmm {z}, zmm, zmm" xed="VPSUBUSB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_subs_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBUSB" form="zmm, zmm, zmm" xed="VPSUBUSB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_subs_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBUSB" form="xmm {k}, xmm, xmm" xed="VPSUBUSB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_subs_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBUSB" form="xmm {z}, xmm, xmm" xed="VPSUBUSB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_subs_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBUSW" form="ymm {k}, ymm, ymm" xed="VPSUBUSW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_subs_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBUSW" form="ymm {z}, ymm, ymm" xed="VPSUBUSW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_subs_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBUSW" form="zmm {k}, zmm, zmm" xed="VPSUBUSW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_subs_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBUSW" form="zmm {z}, zmm, zmm" xed="VPSUBUSW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_subs_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBUSW" form="zmm, zmm, zmm" xed="VPSUBUSW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_subs_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBUSW" form="xmm {k}, xmm, xmm" xed="VPSUBUSW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_subs_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBUSW" form="xmm {z}, xmm, xmm" xed="VPSUBUSW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sub_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] - b[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBW" form="ymm {k}, ymm, ymm" xed="VPSUBW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sub_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] - b[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBW" form="ymm {z}, ymm, ymm" xed="VPSUBW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sub_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] - b[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBW" form="zmm {k}, zmm, zmm" xed="VPSUBW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sub_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] - b[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBW" form="zmm {z}, zmm, zmm" xed="VPSUBW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sub_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := a[i+15:i] - b[i+15:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBW" form="zmm, zmm, zmm" xed="VPSUBW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sub_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] - b[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBW" form="xmm {k}, xmm, xmm" xed="VPSUBW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sub_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[i+15:i] - b[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBW" form="xmm {z}, xmm, xmm" xed="VPSUBW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_test_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPTESTMB" form="k {k}, ymm, ymm" xed="VPTESTMB_MASKmskw_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_test_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPTESTMB" form="k, ymm, ymm" xed="VPTESTMB_MASKmskw_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_test_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPTESTMB" form="k {k}, zmm, zmm" xed="VPTESTMB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_test_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPTESTMB" form="k, zmm, zmm" xed="VPTESTMB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_test_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTMB" form="k {k}, xmm, xmm" xed="VPTESTMB_MASKmskw_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_test_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise AND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ((a[i+7:i] AND b[i+7:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTMB" form="k, xmm, xmm" xed="VPTESTMB_MASKmskw_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_test_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTMW" form="k {k}, ymm, ymm" xed="VPTESTMW_MASKmskw_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_test_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTMW" form="k, ymm, ymm" xed="VPTESTMW_MASKmskw_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_test_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPTESTMW" form="k {k}, zmm, zmm" xed="VPTESTMW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_test_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPTESTMW" form="k, zmm, zmm" xed="VPTESTMW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_test_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTMW" form="k {k}, xmm, xmm" xed="VPTESTMW_MASKmskw_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_test_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise AND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ((a[i+15:i] AND b[i+15:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTMW" form="k, xmm, xmm" xed="VPTESTMW_MASKmskw_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_testn_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k1[j]
+ k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPTESTNMB" form="k {k}, ymm, ymm" xed="VPTESTNMB_MASKmskw_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_testn_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPTESTNMB" form="k, ymm, ymm" xed="VPTESTNMB_MASKmskw_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_testn_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k1[j]
+ k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPTESTNMB" form="k {k}, zmm, zmm" xed="VPTESTNMB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_testn_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:64] := 0
+ </operation>
+ <instruction name="VPTESTNMB" form="k, zmm, zmm" xed="VPTESTNMB_MASKmskw_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_testn_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k1[j]
+ k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTNMB" form="k {k}, xmm, xmm" xed="VPTESTNMB_MASKmskw_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_testn_epi8_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compute the bitwise NAND of packed 8-bit integers in "a" and "b", producing intermediate 8-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ k[j] := ((a[i+7:i] AND b[i+7:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTNMB" form="k, xmm, xmm" xed="VPTESTNMB_MASKmskw_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_testn_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k1[j]
+ k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTNMW" form="k {k}, ymm, ymm" xed="VPTESTNMW_MASKmskw_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_testn_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTNMW" form="k, ymm, ymm" xed="VPTESTNMW_MASKmskw_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_testn_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k1[j]
+ k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPTESTNMW" form="k {k}, zmm, zmm" xed="VPTESTNMW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_testn_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:32] := 0
+ </operation>
+ <instruction name="VPTESTNMW" form="k, zmm, zmm" xed="VPTESTNMW_MASKmskw_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_testn_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k1[j]
+ k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTNMW" form="k {k}, xmm, xmm" xed="VPTESTNMW_MASKmskw_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_testn_epi16_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compute the bitwise NAND of packed 16-bit integers in "a" and "b", producing intermediate 16-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ k[j] := ((a[i+15:i] AND b[i+15:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTNMW" form="k, xmm, xmm" xed="VPTESTNMW_MASKmskw_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpackhi_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[71:64]
+ dst[15:8] := src2[71:64]
+ dst[23:16] := src1[79:72]
+ dst[31:24] := src2[79:72]
+ dst[39:32] := src1[87:80]
+ dst[47:40] := src2[87:80]
+ dst[55:48] := src1[95:88]
+ dst[63:56] := src2[95:88]
+ dst[71:64] := src1[103:96]
+ dst[79:72] := src2[103:96]
+ dst[87:80] := src1[111:104]
+ dst[95:88] := src2[111:104]
+ dst[103:96] := src1[119:112]
+ dst[111:104] := src2[119:112]
+ dst[119:112] := src1[127:120]
+ dst[127:120] := src2[127:120]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128])
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHBW" form="ymm {k}, ymm, ymm" xed="VPUNPCKHBW_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpackhi_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[71:64]
+ dst[15:8] := src2[71:64]
+ dst[23:16] := src1[79:72]
+ dst[31:24] := src2[79:72]
+ dst[39:32] := src1[87:80]
+ dst[47:40] := src2[87:80]
+ dst[55:48] := src1[95:88]
+ dst[63:56] := src2[95:88]
+ dst[71:64] := src1[103:96]
+ dst[79:72] := src2[103:96]
+ dst[87:80] := src1[111:104]
+ dst[95:88] := src2[111:104]
+ dst[103:96] := src1[119:112]
+ dst[111:104] := src2[119:112]
+ dst[119:112] := src1[127:120]
+ dst[127:120] := src2[127:120]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128])
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHBW" form="ymm {z}, ymm, ymm" xed="VPUNPCKHBW_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpackhi_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[71:64]
+ dst[15:8] := src2[71:64]
+ dst[23:16] := src1[79:72]
+ dst[31:24] := src2[79:72]
+ dst[39:32] := src1[87:80]
+ dst[47:40] := src2[87:80]
+ dst[55:48] := src1[95:88]
+ dst[63:56] := src2[95:88]
+ dst[71:64] := src1[103:96]
+ dst[79:72] := src2[103:96]
+ dst[87:80] := src1[111:104]
+ dst[95:88] := src2[111:104]
+ dst[103:96] := src1[119:112]
+ dst[111:104] := src2[119:112]
+ dst[119:112] := src1[127:120]
+ dst[127:120] := src2[127:120]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384])
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHBW" form="zmm {k}, zmm, zmm" xed="VPUNPCKHBW_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpackhi_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[71:64]
+ dst[15:8] := src2[71:64]
+ dst[23:16] := src1[79:72]
+ dst[31:24] := src2[79:72]
+ dst[39:32] := src1[87:80]
+ dst[47:40] := src2[87:80]
+ dst[55:48] := src1[95:88]
+ dst[63:56] := src2[95:88]
+ dst[71:64] := src1[103:96]
+ dst[79:72] := src2[103:96]
+ dst[87:80] := src1[111:104]
+ dst[95:88] := src2[111:104]
+ dst[103:96] := src1[119:112]
+ dst[111:104] := src2[119:112]
+ dst[119:112] := src1[127:120]
+ dst[127:120] := src2[127:120]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384])
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHBW" form="zmm {z}, zmm, zmm" xed="VPUNPCKHBW_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpackhi_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[71:64]
+ dst[15:8] := src2[71:64]
+ dst[23:16] := src1[79:72]
+ dst[31:24] := src2[79:72]
+ dst[39:32] := src1[87:80]
+ dst[47:40] := src2[87:80]
+ dst[55:48] := src1[95:88]
+ dst[63:56] := src2[95:88]
+ dst[71:64] := src1[103:96]
+ dst[79:72] := src2[103:96]
+ dst[87:80] := src1[111:104]
+ dst[95:88] := src2[111:104]
+ dst[103:96] := src1[119:112]
+ dst[111:104] := src2[119:112]
+ dst[119:112] := src1[127:120]
+ dst[127:120] := src2[127:120]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_BYTES(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_HIGH_BYTES(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_HIGH_BYTES(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHBW" form="zmm, zmm, zmm" xed="VPUNPCKHBW_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpackhi_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[71:64]
+ dst[15:8] := src2[71:64]
+ dst[23:16] := src1[79:72]
+ dst[31:24] := src2[79:72]
+ dst[39:32] := src1[87:80]
+ dst[47:40] := src2[87:80]
+ dst[55:48] := src1[95:88]
+ dst[63:56] := src2[95:88]
+ dst[71:64] := src1[103:96]
+ dst[79:72] := src2[103:96]
+ dst[87:80] := src1[111:104]
+ dst[95:88] := src2[111:104]
+ dst[103:96] := src1[119:112]
+ dst[111:104] := src2[119:112]
+ dst[119:112] := src1[127:120]
+ dst[127:120] := src2[127:120]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0])
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKHBW" form="xmm {k}, xmm, xmm" xed="VPUNPCKHBW_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpackhi_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[71:64]
+ dst[15:8] := src2[71:64]
+ dst[23:16] := src1[79:72]
+ dst[31:24] := src2[79:72]
+ dst[39:32] := src1[87:80]
+ dst[47:40] := src2[87:80]
+ dst[55:48] := src1[95:88]
+ dst[63:56] := src2[95:88]
+ dst[71:64] := src1[103:96]
+ dst[79:72] := src2[103:96]
+ dst[87:80] := src1[111:104]
+ dst[95:88] := src2[111:104]
+ dst[103:96] := src1[119:112]
+ dst[111:104] := src2[119:112]
+ dst[119:112] := src1[127:120]
+ dst[127:120] := src2[127:120]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0])
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKHBW" form="xmm {z}, xmm, xmm" xed="VPUNPCKHBW_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpackhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[79:64]
+ dst[31:16] := src2[79:64]
+ dst[47:32] := src1[95:80]
+ dst[63:48] := src2[95:80]
+ dst[79:64] := src1[111:96]
+ dst[95:80] := src2[111:96]
+ dst[111:96] := src1[127:112]
+ dst[127:112] := src2[127:112]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128])
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHWD" form="ymm {k}, ymm, ymm" xed="VPUNPCKHWD_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpackhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[79:64]
+ dst[31:16] := src2[79:64]
+ dst[47:32] := src1[95:80]
+ dst[63:48] := src2[95:80]
+ dst[79:64] := src1[111:96]
+ dst[95:80] := src2[111:96]
+ dst[111:96] := src1[127:112]
+ dst[127:112] := src2[127:112]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128])
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHWD" form="ymm {z}, ymm, ymm" xed="VPUNPCKHWD_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpackhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[79:64]
+ dst[31:16] := src2[79:64]
+ dst[47:32] := src1[95:80]
+ dst[63:48] := src2[95:80]
+ dst[79:64] := src1[111:96]
+ dst[95:80] := src2[111:96]
+ dst[111:96] := src1[127:112]
+ dst[127:112] := src2[127:112]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384])
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHWD" form="zmm {k}, zmm, zmm" xed="VPUNPCKHWD_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpackhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[79:64]
+ dst[31:16] := src2[79:64]
+ dst[47:32] := src1[95:80]
+ dst[63:48] := src2[95:80]
+ dst[79:64] := src1[111:96]
+ dst[95:80] := src2[111:96]
+ dst[111:96] := src1[127:112]
+ dst[127:112] := src2[127:112]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384])
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHWD" form="zmm {z}, zmm, zmm" xed="VPUNPCKHWD_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpackhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[79:64]
+ dst[31:16] := src2[79:64]
+ dst[47:32] := src1[95:80]
+ dst[63:48] := src2[95:80]
+ dst[79:64] := src1[111:96]
+ dst[95:80] := src2[111:96]
+ dst[111:96] := src1[127:112]
+ dst[127:112] := src2[127:112]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_WORDS(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_HIGH_WORDS(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_HIGH_WORDS(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHWD" form="zmm, zmm, zmm" xed="VPUNPCKHWD_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpackhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[79:64]
+ dst[31:16] := src2[79:64]
+ dst[47:32] := src1[95:80]
+ dst[63:48] := src2[95:80]
+ dst[79:64] := src1[111:96]
+ dst[95:80] := src2[111:96]
+ dst[111:96] := src1[127:112]
+ dst[127:112] := src2[127:112]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0])
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKHWD" form="xmm {k}, xmm, xmm" xed="VPUNPCKHWD_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpackhi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[79:64]
+ dst[31:16] := src2[79:64]
+ dst[47:32] := src1[95:80]
+ dst[63:48] := src2[95:80]
+ dst[79:64] := src1[111:96]
+ dst[95:80] := src2[111:96]
+ dst[111:96] := src1[127:112]
+ dst[127:112] := src2[127:112]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0])
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKHWD" form="xmm {z}, xmm, xmm" xed="VPUNPCKHWD_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpacklo_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[7:0]
+ dst[15:8] := src2[7:0]
+ dst[23:16] := src1[15:8]
+ dst[31:24] := src2[15:8]
+ dst[39:32] := src1[23:16]
+ dst[47:40] := src2[23:16]
+ dst[55:48] := src1[31:24]
+ dst[63:56] := src2[31:24]
+ dst[71:64] := src1[39:32]
+ dst[79:72] := src2[39:32]
+ dst[87:80] := src1[47:40]
+ dst[95:88] := src2[47:40]
+ dst[103:96] := src1[55:48]
+ dst[111:104] := src2[55:48]
+ dst[119:112] := src1[63:56]
+ dst[127:120] := src2[63:56]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128])
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLBW" form="ymm {k}, ymm, ymm" xed="VPUNPCKLBW_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpacklo_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[7:0]
+ dst[15:8] := src2[7:0]
+ dst[23:16] := src1[15:8]
+ dst[31:24] := src2[15:8]
+ dst[39:32] := src1[23:16]
+ dst[47:40] := src2[23:16]
+ dst[55:48] := src1[31:24]
+ dst[63:56] := src2[31:24]
+ dst[71:64] := src1[39:32]
+ dst[79:72] := src2[39:32]
+ dst[87:80] := src1[47:40]
+ dst[95:88] := src2[47:40]
+ dst[103:96] := src1[55:48]
+ dst[111:104] := src2[55:48]
+ dst[119:112] := src1[63:56]
+ dst[127:120] := src2[63:56]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128])
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLBW" form="ymm {z}, ymm, ymm" xed="VPUNPCKLBW_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpacklo_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[7:0]
+ dst[15:8] := src2[7:0]
+ dst[23:16] := src1[15:8]
+ dst[31:24] := src2[15:8]
+ dst[39:32] := src1[23:16]
+ dst[47:40] := src2[23:16]
+ dst[55:48] := src1[31:24]
+ dst[63:56] := src2[31:24]
+ dst[71:64] := src1[39:32]
+ dst[79:72] := src2[39:32]
+ dst[87:80] := src1[47:40]
+ dst[95:88] := src2[47:40]
+ dst[103:96] := src1[55:48]
+ dst[111:104] := src2[55:48]
+ dst[119:112] := src1[63:56]
+ dst[127:120] := src2[63:56]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384])
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLBW" form="zmm {k}, zmm, zmm" xed="VPUNPCKLBW_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpacklo_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[7:0]
+ dst[15:8] := src2[7:0]
+ dst[23:16] := src1[15:8]
+ dst[31:24] := src2[15:8]
+ dst[39:32] := src1[23:16]
+ dst[47:40] := src2[23:16]
+ dst[55:48] := src1[31:24]
+ dst[63:56] := src2[31:24]
+ dst[71:64] := src1[39:32]
+ dst[79:72] := src2[39:32]
+ dst[87:80] := src1[47:40]
+ dst[95:88] := src2[47:40]
+ dst[103:96] := src1[55:48]
+ dst[111:104] := src2[55:48]
+ dst[119:112] := src1[63:56]
+ dst[127:120] := src2[63:56]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384])
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLBW" form="zmm {z}, zmm, zmm" xed="VPUNPCKLBW_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpacklo_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[7:0]
+ dst[15:8] := src2[7:0]
+ dst[23:16] := src1[15:8]
+ dst[31:24] := src2[15:8]
+ dst[39:32] := src1[23:16]
+ dst[47:40] := src2[23:16]
+ dst[55:48] := src1[31:24]
+ dst[63:56] := src2[31:24]
+ dst[71:64] := src1[39:32]
+ dst[79:72] := src2[39:32]
+ dst[87:80] := src1[47:40]
+ dst[95:88] := src2[47:40]
+ dst[103:96] := src1[55:48]
+ dst[111:104] := src2[55:48]
+ dst[119:112] := src1[63:56]
+ dst[127:120] := src2[63:56]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_BYTES(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_BYTES(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_BYTES(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLBW" form="zmm, zmm, zmm" xed="VPUNPCKLBW_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpacklo_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[7:0]
+ dst[15:8] := src2[7:0]
+ dst[23:16] := src1[15:8]
+ dst[31:24] := src2[15:8]
+ dst[39:32] := src1[23:16]
+ dst[47:40] := src2[23:16]
+ dst[55:48] := src1[31:24]
+ dst[63:56] := src2[31:24]
+ dst[71:64] := src1[39:32]
+ dst[79:72] := src2[39:32]
+ dst[87:80] := src1[47:40]
+ dst[95:88] := src2[47:40]
+ dst[103:96] := src1[55:48]
+ dst[111:104] := src2[55:48]
+ dst[119:112] := src1[63:56]
+ dst[127:120] := src2[63:56]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0])
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKLBW" form="xmm {k}, xmm, xmm" xed="VPUNPCKLBW_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpacklo_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[7:0]
+ dst[15:8] := src2[7:0]
+ dst[23:16] := src1[15:8]
+ dst[31:24] := src2[15:8]
+ dst[39:32] := src1[23:16]
+ dst[47:40] := src2[23:16]
+ dst[55:48] := src1[31:24]
+ dst[63:56] := src2[31:24]
+ dst[71:64] := src1[39:32]
+ dst[79:72] := src2[39:32]
+ dst[87:80] := src1[47:40]
+ dst[95:88] := src2[47:40]
+ dst[103:96] := src1[55:48]
+ dst[111:104] := src2[55:48]
+ dst[119:112] := src1[63:56]
+ dst[127:120] := src2[63:56]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0])
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := tmp_dst[i+7:i]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKLBW" form="xmm {z}, xmm, xmm" xed="VPUNPCKLBW_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpacklo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[15:0]
+ dst[31:16] := src2[15:0]
+ dst[47:32] := src1[31:16]
+ dst[63:48] := src2[31:16]
+ dst[79:64] := src1[47:32]
+ dst[95:80] := src2[47:32]
+ dst[111:96] := src1[63:48]
+ dst[127:112] := src2[63:48]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128])
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLWD" form="ymm {k}, ymm, ymm" xed="VPUNPCKLWD_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpacklo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[15:0]
+ dst[31:16] := src2[15:0]
+ dst[47:32] := src1[31:16]
+ dst[63:48] := src2[31:16]
+ dst[79:64] := src1[47:32]
+ dst[95:80] := src2[47:32]
+ dst[111:96] := src1[63:48]
+ dst[127:112] := src2[63:48]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128])
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLWD" form="ymm {z}, ymm, ymm" xed="VPUNPCKLWD_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpacklo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[15:0]
+ dst[31:16] := src2[15:0]
+ dst[47:32] := src1[31:16]
+ dst[63:48] := src2[31:16]
+ dst[79:64] := src1[47:32]
+ dst[95:80] := src2[47:32]
+ dst[111:96] := src1[63:48]
+ dst[127:112] := src2[63:48]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384])
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLWD" form="zmm {k}, zmm, zmm" xed="VPUNPCKLWD_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpacklo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[15:0]
+ dst[31:16] := src2[15:0]
+ dst[47:32] := src1[31:16]
+ dst[63:48] := src2[31:16]
+ dst[79:64] := src1[47:32]
+ dst[95:80] := src2[47:32]
+ dst[111:96] := src1[63:48]
+ dst[127:112] := src2[63:48]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384])
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLWD" form="zmm {z}, zmm, zmm" xed="VPUNPCKLWD_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpacklo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[15:0]
+ dst[31:16] := src2[15:0]
+ dst[47:32] := src1[31:16]
+ dst[63:48] := src2[31:16]
+ dst[79:64] := src1[47:32]
+ dst[95:80] := src2[47:32]
+ dst[111:96] := src1[63:48]
+ dst[127:112] := src2[63:48]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_WORDS(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_WORDS(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_WORDS(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLWD" form="zmm, zmm, zmm" xed="VPUNPCKLWD_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpacklo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[15:0]
+ dst[31:16] := src2[15:0]
+ dst[47:32] := src1[31:16]
+ dst[63:48] := src2[31:16]
+ dst[79:64] := src1[47:32]
+ dst[95:80] := src2[47:32]
+ dst[111:96] := src1[63:48]
+ dst[127:112] := src2[63:48]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0])
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKLWD" form="xmm {k}, xmm, xmm" xed="VPUNPCKLWD_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpacklo_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[15:0]
+ dst[31:16] := src2[15:0]
+ dst[47:32] := src1[31:16]
+ dst[63:48] := src2[31:16]
+ dst[79:64] := src1[47:32]
+ dst[95:80] := src2[47:32]
+ dst[111:96] := src1[63:48]
+ dst[127:112] := src2[63:48]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0])
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := tmp_dst[i+15:i]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKLWD" form="xmm {z}, xmm, xmm" xed="VPUNPCKLWD_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI16" memwidth="512"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Store 512-bits (composed of 32 packed 16-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVDQU16" form="m512, zmm" xed="VMOVDQU16_MEMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI8" memwidth="512"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Store 512-bits (composed of 64 packed 8-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVDQU8" form="m512, zmm" xed="VMOVDQU8_MEMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI16" memwidth="256"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Store 256-bits (composed of 16 packed 16-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVDQU16" form="m256, ymm" xed="VMOVDQU16_MEMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI8" memwidth="256"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Store 256-bits (composed of 32 packed 8-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVDQU8" form="m256, ymm" xed="VMOVDQU8_MEMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI16" memwidth="128"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Store 128-bits (composed of 8 packed 16-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="VMOVDQU16" form="m128, xmm" xed="VMOVDQU16_MEMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI8" memwidth="128"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Store 128-bits (composed of 16 packed 8-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="VMOVDQU8" form="m128, xmm" xed="VMOVDQU8_MEMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_loadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI16" memwidth="512"/>
+ <description>Load 512-bits (composed of 32 packed 16-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="zmm, m512" xed="VMOVDQU16_ZMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_loadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI8" memwidth="512"/>
+ <description>Load 512-bits (composed of 64 packed 8-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="zmm, m512" xed="VMOVDQU8_ZMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_loadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI16" memwidth="256"/>
+ <description>Load 256-bits (composed of 16 packed 16-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="ymm, m256" xed="VMOVDQU16_YMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_loadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI8" memwidth="256"/>
+ <description>Load 256-bits (composed of 32 packed 8-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="ymm, m256" xed="VMOVDQU8_YMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_loadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI16" memwidth="128"/>
+ <description>Load 128-bits (composed of 8 packed 16-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU16" form="xmm, m128" xed="VMOVDQU16_XMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_loadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI8" memwidth="128"/>
+ <description>Load 128-bits (composed of 16 packed 8-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU8" form="xmm, m128" xed="VMOVDQU8_XMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kadd_mask32">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <description>Add 32-bit masks in "a" and "b", and store the result in "k".</description>
+ <operation>
+k[31:0] := a[31:0] + b[31:0]
+k[MAX:32] := 0
+ </operation>
+ <instruction name="KADDD" form="k, k, k" xed="KADDD_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kadd_mask64">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <description>Add 64-bit masks in "a" and "b", and store the result in "k".</description>
+ <operation>
+k[63:0] := a[63:0] + b[63:0]
+k[MAX:64] := 0
+ </operation>
+ <instruction name="KADDQ" form="k, k, k" xed="KADDQ_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kand_mask32">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <description>Compute the bitwise AND of 32-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[31:0] := a[31:0] AND b[31:0]
+k[MAX:32] := 0
+ </operation>
+ <instruction name="KANDD" form="k, k, k" xed="KANDD_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kand_mask64">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <description>Compute the bitwise AND of 64-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[63:0] := a[63:0] AND b[63:0]
+k[MAX:64] := 0
+ </operation>
+ <instruction name="KANDQ" form="k, k, k" xed="KANDQ_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kandn_mask32">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <description>Compute the bitwise NOT of 32-bit masks "a" and then AND with "b", and store the result in "k".</description>
+ <operation>
+k[31:0] := (NOT a[31:0]) AND b[31:0]
+k[MAX:32] := 0
+ </operation>
+ <instruction name="KANDND" form="k, k, k" xed="KANDND_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kandn_mask64">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <description>Compute the bitwise NOT of 64-bit masks "a" and then AND with "b", and store the result in "k".</description>
+ <operation>
+k[63:0] := (NOT a[63:0]) AND b[63:0]
+k[MAX:64] := 0
+ </operation>
+ <instruction name="KANDNQ" form="k, k, k" xed="KANDNQ_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_knot_mask32">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <description>Compute the bitwise NOT of 32-bit mask "a", and store the result in "k".</description>
+ <operation>
+k[31:0] := NOT a[31:0]
+k[MAX:32] := 0
+ </operation>
+ <instruction name="KNOTD" form="k, k" xed="KNOTD_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_knot_mask64">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <description>Compute the bitwise NOT of 64-bit mask "a", and store the result in "k".</description>
+ <operation>
+k[63:0] := NOT a[63:0]
+k[MAX:64] := 0
+ </operation>
+ <instruction name="KNOTQ" form="k, k" xed="KNOTQ_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kor_mask32">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 32-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[31:0] := a[31:0] OR b[31:0]
+k[MAX:32] := 0
+ </operation>
+ <instruction name="KORD" form="k, k, k" xed="KORD_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kor_mask64">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 64-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[63:0] := a[63:0] OR b[63:0]
+k[MAX:64] := 0
+ </operation>
+ <instruction name="KORQ" form="k, k, k" xed="KORQ_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kxnor_mask32">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XNOR of 32-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[31:0] := NOT (a[31:0] XOR b[31:0])
+k[MAX:32] := 0
+ </operation>
+ <instruction name="KXNORD" form="k, k, k" xed="KXNORD_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kxnor_mask64">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XNOR of 64-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[63:0] := NOT (a[63:0] XOR b[63:0])
+k[MAX:64] := 0
+ </operation>
+ <instruction name="KXNORQ" form="k, k, k" xed="KXNORQ_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kxor_mask32">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XOR of 32-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[31:0] := a[31:0] XOR b[31:0]
+k[MAX:32] := 0
+ </operation>
+ <instruction name="KXORD" form="k, k, k" xed="KXORD_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kxor_mask64">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XOR of 64-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[63:0] := a[63:0] XOR b[63:0]
+k[MAX:64] := 0
+ </operation>
+ <instruction name="KXORQ" form="k, k, k" xed="KXORQ_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kshiftli_mask32">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="unsigned int" varname="count" etype="IMM" immwidth="8"/>
+ <description>Shift the bits of 32-bit mask "a" left by "count" while shifting in zeros, and store the least significant 32 bits of the result in "k".</description>
+ <operation>
+k[MAX:0] := 0
+IF count[7:0] &lt;= 31
+ k[31:0] := a[31:0] &lt;&lt; count[7:0]
+FI
+ </operation>
+ <instruction name="KSHIFTLD" form="k, k, imm8" xed="KSHIFTLD_MASKmskw_MASKmskw_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kshiftli_mask64">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="unsigned int" varname="count" etype="IMM" immwidth="8"/>
+ <description>Shift the bits of 64-bit mask "a" left by "count" while shifting in zeros, and store the least significant 64 bits of the result in "k".</description>
+ <operation>
+k[MAX:0] := 0
+IF count[7:0] &lt;= 63
+ k[63:0] := a[63:0] &lt;&lt; count[7:0]
+FI
+ </operation>
+ <instruction name="KSHIFTLQ" form="k, k, imm8" xed="KSHIFTLQ_MASKmskw_MASKmskw_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kshiftri_mask32">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="unsigned int" varname="count" etype="IMM" immwidth="8"/>
+ <description>Shift the bits of 32-bit mask "a" right by "count" while shifting in zeros, and store the least significant 32 bits of the result in "k".</description>
+ <operation>
+k[MAX:0] := 0
+IF count[7:0] &lt;= 31
+ k[31:0] := a[31:0] &gt;&gt; count[7:0]
+FI
+ </operation>
+ <instruction name="KSHIFTRD" form="k, k, imm8" xed="KSHIFTRD_MASKmskw_MASKmskw_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kshiftri_mask64">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="unsigned int" varname="count" etype="IMM" immwidth="8"/>
+ <description>Shift the bits of 64-bit mask "a" right by "count" while shifting in zeros, and store the least significant 64 bits of the result in "k".</description>
+ <operation>
+k[MAX:0] := 0
+IF count[7:0] &lt;= 63
+ k[63:0] := a[63:0] &gt;&gt; count[7:0]
+FI
+ </operation>
+ <instruction name="KSHIFTRQ" form="k, k, imm8" xed="KSHIFTRQ_MASKmskw_MASKmskw_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_load_mask32">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__mmask32*" varname="mem_addr" etype="MASK" memwidth="32"/>
+ <description>Load 32-bit mask from memory into "k".</description>
+ <operation>
+k[31:0] := MEM[mem_addr+31:mem_addr]
+ </operation>
+ <instruction name="KMOVD" form="k, m32" xed="KMOVD_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_load_mask64">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Load</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__mmask64*" varname="mem_addr" etype="MASK" memwidth="64"/>
+ <description>Load 64-bit mask from memory into "k".</description>
+ <operation>
+k[63:0] := MEM[mem_addr+63:mem_addr]
+ </operation>
+ <instruction name="KMOVQ" form="k, m64" xed="KMOVQ_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_store_mask32">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__mmask32*" varname="mem_addr" etype="MASK" memwidth="32"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <description>Store 32-bit mask from "a" into memory.</description>
+ <operation>
+MEM[mem_addr+31:mem_addr] := a[31:0]
+ </operation>
+ <instruction name="KMOVD" form="m32, k" xed="KMOVD_MEMu32_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_store_mask64">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__mmask64*" varname="mem_addr" etype="MASK" memwidth="64"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <description>Store 64-bit mask from "a" into memory.</description>
+ <operation>
+MEM[mem_addr+63:mem_addr] := a[63:0]
+ </operation>
+ <instruction name="KMOVQ" form="m64, k" xed="KMOVQ_MEMu64_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortest_mask32_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <parameter type="unsigned char*" varname="all_ones" etype="UI8" memwidth="8"/>
+ <description>Compute the bitwise OR of 32-bit masks "a" and "b". If the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in "all_ones", otherwise store 0 in "all_ones".</description>
+ <operation>
+tmp[31:0] := a[31:0] OR b[31:0]
+IF tmp[31:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+IF tmp[31:0] == 0xFFFFFFFF
+ MEM[all_ones+7:all_ones] := 1
+ELSE
+ MEM[all_ones+7:all_ones] := 0
+FI
+ </operation>
+ <instruction name="KORTESTD" form="k, k" xed="KORTESTD_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortestz_mask32_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 32-bit masks "a" and "b". If the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[31:0] := a[31:0] OR b[31:0]
+IF tmp[31:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KORTESTD" form="k, k" xed="KORTESTD_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortestc_mask32_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 32-bit masks "a" and "b". If the result is all ones, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[31:0] := a[31:0] OR b[31:0]
+IF tmp[31:0] == 0xFFFFFFFF
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KORTESTD" form="k, k" xed="KORTESTD_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortest_mask64_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <parameter type="unsigned char*" varname="all_ones" etype="UI8" memwidth="8"/>
+ <description>Compute the bitwise OR of 64-bit masks "a" and "b". If the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in "all_ones", otherwise store 0 in "all_ones".</description>
+ <operation>
+tmp[63:0] := a[63:0] OR b[63:0]
+IF tmp[63:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+IF tmp[7:0] == 0xFFFFFFFFFFFFFFFF
+ MEM[all_ones+7:all_ones] := 1
+ELSE
+ MEM[all_ones+7:all_ones] := 0
+FI
+ </operation>
+ <instruction name="KORTESTQ" form="k, k" xed="KORTESTQ_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortestz_mask64_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 64-bit masks "a" and "b". If the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[63:0] := a[63:0] OR b[63:0]
+IF tmp[63:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KORTESTQ" form="k, k" xed="KORTESTQ_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortestc_mask64_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 64-bit masks "a" and "b". If the result is all ones, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[63:0] := a[63:0] OR b[63:0]
+IF tmp[63:0] == 0xFFFFFFFFFFFFFFFF
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KORTESTQ" form="k, k" xed="KORTESTQ_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktest_mask32_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <parameter type="unsigned char*" varname="and_not" etype="UI8" memwidth="8"/>
+ <description>Compute the bitwise AND of 32-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 in "and_not".</description>
+ <operation>
+tmp1[31:0] := a[31:0] AND b[31:0]
+IF tmp1[31:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+tmp2[31:0] := (NOT a[31:0]) AND b[31:0]
+IF tmp2[31:0] == 0x0
+ MEM[and_not+7:and_not] := 1
+ELSE
+ MEM[and_not+7:and_not] := 0
+FI
+ </operation>
+ <instruction name="KTESTD" form="k, k" xed="KTESTD_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktestz_mask32_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <description>Compute the bitwise AND of 32-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[31:0] := a[31:0] AND b[31:0]
+IF tmp[31:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KTESTD" form="k, k" xed="KTESTD_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktestc_mask32_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <parameter type="__mmask32" varname="b" etype="MASK"/>
+ <description>Compute the bitwise NOT of 32-bit mask "a" and then AND with "b", if the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[31:0] := (NOT a[31:0]) AND b[31:0]
+IF tmp[31:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KTESTD" form="k, k" xed="KTESTD_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktest_mask64_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <parameter type="unsigned char*" varname="and_not" etype="UI8" memwidth="8"/>
+ <description>Compute the bitwise AND of 64-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 in "and_not".</description>
+ <operation>
+tmp1[63:0] := a[63:0] AND b[63:0]
+IF tmp1[63:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+tmp2[63:0] := (NOT a[63:0]) AND b[63:0]
+IF tmp2[63:0] == 0x0
+ MEM[and_not+7:and_not] := 1
+ELSE
+ MEM[and_not+7:and_not] := 0
+FI
+ </operation>
+ <instruction name="KTESTQ" form="k, k" xed="KTESTQ_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktestz_mask64_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <description>Compute the bitwise AND of 64-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[63:0] := a[63:0] AND b[63:0]
+IF tmp[63:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KTESTQ" form="k, k" xed="KTESTQ_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktestc_mask64_u8">
+ <type>Mask</type>
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <parameter type="__mmask64" varname="b" etype="MASK"/>
+ <description>Compute the bitwise NOT of 64-bit mask "a" and then AND with "b", if the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[63:0] := (NOT a[63:0]) AND b[63:0]
+IF tmp[63:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KTESTQ" form="k, k" xed="KTESTQ_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_cvtmask32_u32">
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask32" varname="a" etype="MASK"/>
+ <description>Convert 32-bit mask "a" into an integer value, and store the result in "dst".</description>
+ <operation>
+dst := ZeroExtend32(a[31:0])
+ </operation>
+ <instruction name="KMOVD" form="r32, k" xed="KMOVD_GPR32u32_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_cvtmask64_u64">
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__mmask64" varname="a" etype="MASK"/>
+ <description>Convert 64-bit mask "a" into an integer value, and store the result in "dst".</description>
+ <operation>
+dst := ZeroExtend64(a[63:0])
+ </operation>
+ <instruction name="KMOVQ" form="r64, k" xed="KMOVQ_GPR64u64_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_cvtu32_mask32">
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Convert integer value "a" into an 32-bit mask, and store the result in "k".</description>
+ <operation>
+k := ZeroExtend32(a[31:0])
+ </operation>
+ <instruction name="KMOVD" form="k, r32" xed="KMOVD_MASKmskw_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_cvtu64_mask64">
+ <CPUID>AVX512BW</CPUID>
+ <category>Mask</category>
+ <return type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Convert integer value "a" into an 64-bit mask, and store the result in "k".</description>
+ <operation>
+k := ZeroExtend64(a[63:0])
+ </operation>
+ <instruction name="KMOVQ" form="k, r64" xed="KMOVQ_MASKmskw_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_broadcastmb_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ZeroExtend64(k[7:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTMB2Q" form="ymm" xed="VPBROADCASTMB2Q_YMMu64_MASKu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_broadcastmb_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ZeroExtend64(k[7:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTMB2Q" form="xmm" xed="VPBROADCASTMB2Q_XMMu64_MASKu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_broadcastmw_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ZeroExtend32(k[15:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTMW2D" form="ymm" xed="VPBROADCASTMW2D_YMMu32_MASKu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_broadcastmw_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ZeroExtend32(k[15:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTMW2D" form="xmm" xed="VPBROADCASTMW2D_XMMu32_MASKu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_conflict_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ FOR k := 0 to j-1
+ m := k*32
+ dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0
+ ENDFOR
+ dst[i+31:i+j] := 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCONFLICTD" form="ymm, ymm" xed="VPCONFLICTD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_conflict_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*32
+ dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0
+ ENDFOR
+ dst[i+31:i+j] := 0
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCONFLICTD" form="ymm {k}, ymm" xed="VPCONFLICTD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_conflict_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*32
+ dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0
+ ENDFOR
+ dst[i+31:i+j] := 0
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCONFLICTD" form="ymm {z}, ymm" xed="VPCONFLICTD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_conflict_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ FOR k := 0 to j-1
+ m := k*32
+ dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0
+ ENDFOR
+ dst[i+31:i+j] := 0
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCONFLICTD" form="xmm, xmm" xed="VPCONFLICTD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_conflict_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*32
+ dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0
+ ENDFOR
+ dst[i+31:i+j] := 0
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCONFLICTD" form="xmm {k}, xmm" xed="VPCONFLICTD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_conflict_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*32
+ dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0
+ ENDFOR
+ dst[i+31:i+j] := 0
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCONFLICTD" form="xmm {z}, xmm" xed="VPCONFLICTD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_conflict_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ FOR k := 0 to j-1
+ m := k*64
+ dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0
+ ENDFOR
+ dst[i+63:i+j] := 0
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCONFLICTQ" form="ymm, ymm" xed="VPCONFLICTQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_conflict_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*64
+ dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0
+ ENDFOR
+ dst[i+63:i+j] := 0
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCONFLICTQ" form="ymm {k}, ymm" xed="VPCONFLICTQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_conflict_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*64
+ dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0
+ ENDFOR
+ dst[i+63:i+j] := 0
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCONFLICTQ" form="ymm {z}, ymm" xed="VPCONFLICTQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_conflict_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ FOR k := 0 to j-1
+ m := k*64
+ dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0
+ ENDFOR
+ dst[i+63:i+j] := 0
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCONFLICTQ" form="xmm, xmm" xed="VPCONFLICTQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_conflict_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*64
+ dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0
+ ENDFOR
+ dst[i+63:i+j] := 0
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCONFLICTQ" form="xmm {k}, xmm" xed="VPCONFLICTQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_conflict_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*64
+ dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0
+ ENDFOR
+ dst[i+63:i+j] := 0
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCONFLICTQ" form="xmm {z}, xmm" xed="VPCONFLICTQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_lzcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ tmp := 31
+ dst[i+31:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+31:i] := dst[i+31:i] + 1
+ OD
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPLZCNTD" form="ymm, ymm" xed="VPLZCNTD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_lzcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ tmp := 31
+ dst[i+31:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+31:i] := dst[i+31:i] + 1
+ OD
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPLZCNTD" form="ymm {k}, ymm" xed="VPLZCNTD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_lzcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ tmp := 31
+ dst[i+31:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+31:i] := dst[i+31:i] + 1
+ OD
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPLZCNTD" form="ymm {z}, ymm" xed="VPLZCNTD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_lzcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ tmp := 31
+ dst[i+31:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+31:i] := dst[i+31:i] + 1
+ OD
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPLZCNTD" form="xmm, xmm" xed="VPLZCNTD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_lzcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ tmp := 31
+ dst[i+31:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+31:i] := dst[i+31:i] + 1
+ OD
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPLZCNTD" form="xmm {k}, xmm" xed="VPLZCNTD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_lzcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ tmp := 31
+ dst[i+31:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+31:i] := dst[i+31:i] + 1
+ OD
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPLZCNTD" form="xmm {z}, xmm" xed="VPLZCNTD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_lzcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ tmp := 63
+ dst[i+63:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+63:i] := dst[i+63:i] + 1
+ OD
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPLZCNTQ" form="ymm, ymm" xed="VPLZCNTQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_lzcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp := 63
+ dst[i+63:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+63:i] := dst[i+63:i] + 1
+ OD
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPLZCNTQ" form="ymm {k}, ymm" xed="VPLZCNTQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_lzcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp := 63
+ dst[i+63:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+63:i] := dst[i+63:i] + 1
+ OD
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPLZCNTQ" form="ymm {z}, ymm" xed="VPLZCNTQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_lzcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ tmp := 63
+ dst[i+63:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+63:i] := dst[i+63:i] + 1
+ OD
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPLZCNTQ" form="xmm, xmm" xed="VPLZCNTQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_lzcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp := 63
+ dst[i+63:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+63:i] := dst[i+63:i] + 1
+ OD
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPLZCNTQ" form="xmm {k}, xmm" xed="VPLZCNTQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_lzcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp := 63
+ dst[i+63:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+63:i] := dst[i+63:i] + 1
+ OD
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPLZCNTQ" form="xmm {z}, xmm" xed="VPLZCNTQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcastmb_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Broadcast the low 8-bits from input mask "k" to all 64-bit elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ZeroExtend64(k[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTMB2Q" form="zmm" xed="VPBROADCASTMB2Q_ZMMu64_MASKu64_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcastmw_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Broadcast the low 16-bits from input mask "k" to all 32-bit elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ZeroExtend32(k[15:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTMW2D" form="zmm" xed="VPBROADCASTMW2D_ZMMu32_MASKu32_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_conflict_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ FOR k := 0 to j-1
+ m := k*32
+ dst[i+k] := (a[i+31:i] == a[m+31:m]) ? 1 : 0
+ ENDFOR
+ dst[i+31:i+j] := 0
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCONFLICTD" form="zmm, zmm" xed="VPCONFLICTD_ZMMu32_MASKmskw_ZMMu32_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_conflict_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*32
+ dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0
+ ENDFOR
+ dst[i+31:i+j] := 0
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCONFLICTD" form="zmm {k}, zmm" xed="VPCONFLICTD_ZMMu32_MASKmskw_ZMMu32_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_conflict_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Test each 32-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*32
+ dst[i+l] := (a[i+31:i] == a[m+31:m]) ? 1 : 0
+ ENDFOR
+ dst[i+31:i+j] := 0
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCONFLICTD" form="zmm {z}, zmm" xed="VPCONFLICTD_ZMMu32_MASKmskw_ZMMu32_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_conflict_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit. Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ FOR k := 0 to j-1
+ m := k*64
+ dst[i+k] := (a[i+63:i] == a[m+63:m]) ? 1 : 0
+ ENDFOR
+ dst[i+63:i+j] := 0
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCONFLICTQ" form="zmm, zmm" xed="VPCONFLICTQ_ZMMu64_MASKmskw_ZMMu64_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_conflict_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*64
+ dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0
+ ENDFOR
+ dst[i+63:i+j] := 0
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCONFLICTQ" form="zmm {k}, zmm" xed="VPCONFLICTQ_ZMMu64_MASKmskw_ZMMu64_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_conflict_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Compare</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Test each 64-bit element of "a" for equality with all other elements in "a" closer to the least significant bit using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). Each element's comparison forms a zero extended bit vector in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ FOR l := 0 to j-1
+ m := l*64
+ dst[i+l] := (a[i+63:i] == a[m+63:m]) ? 1 : 0
+ ENDFOR
+ dst[i+63:i+j] := 0
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCONFLICTQ" form="zmm {z}, zmm" xed="VPCONFLICTQ_ZMMu64_MASKmskw_ZMMu64_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_lzcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ tmp := 31
+ dst[i+31:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+31:i] := dst[i+31:i] + 1
+ OD
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPLZCNTD" form="zmm, zmm" xed="VPLZCNTD_ZMMu32_MASKmskw_ZMMu32_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_lzcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ tmp := 31
+ dst[i+31:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+31:i] := dst[i+31:i] + 1
+ OD
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPLZCNTD" form="zmm {k}, zmm" xed="VPLZCNTD_ZMMu32_MASKmskw_ZMMu32_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_lzcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Counts the number of leading zero bits in each packed 32-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ tmp := 31
+ dst[i+31:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+31:i] := dst[i+31:i] + 1
+ OD
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPLZCNTD" form="zmm {z}, zmm" xed="VPLZCNTD_ZMMu32_MASKmskw_ZMMu32_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_lzcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ tmp := 63
+ dst[i+63:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+63:i] := dst[i+63:i] + 1
+ OD
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPLZCNTQ" form="zmm, zmm" xed="VPLZCNTQ_ZMMu64_MASKmskw_ZMMu64_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_lzcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp := 63
+ dst[i+63:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+63:i] := dst[i+63:i] + 1
+ OD
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPLZCNTQ" form="zmm {k}, zmm" xed="VPLZCNTQ_ZMMu64_MASKmskw_ZMMu64_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_lzcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512CD</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Counts the number of leading zero bits in each packed 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp := 63
+ dst[i+63:i] := 0
+ DO WHILE (tmp &gt;= 0 AND a[i+tmp] == 0)
+ tmp := tmp - 1
+ dst[i+63:i] := dst[i+63:i] + 1
+ OD
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPLZCNTQ" form="zmm {z}, zmm" xed="VPLZCNTQ_ZMMu64_MASKmskw_ZMMu64_AVX512CD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_andnot_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDNPD" form="ymm {k}, ymm, ymm" xed="VANDNPD_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_andnot_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDNPD" form="ymm {z}, ymm, ymm" xed="VANDNPD_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_andnot_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDNPD" form="zmm, zmm, zmm" xed="VANDNPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_andnot_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDNPD" form="zmm {k}, zmm, zmm" xed="VANDNPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_andnot_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDNPD" form="zmm {z}, zmm, zmm" xed="VANDNPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_andnot_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VANDNPD" form="xmm {k}, xmm, xmm" xed="VANDNPD_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_andnot_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VANDNPD" form="xmm {z}, xmm, xmm" xed="VANDNPD_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_andnot_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDNPS" form="ymm {k}, ymm, ymm" xed="VANDNPS_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_andnot_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDNPS" form="ymm {z}, ymm, ymm" xed="VANDNPS_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_andnot_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDNPS" form="zmm, zmm, zmm" xed="VANDNPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_andnot_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDNPS" form="zmm {k}, zmm, zmm" xed="VANDNPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_andnot_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDNPS" form="zmm {z}, zmm, zmm" xed="VANDNPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_andnot_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VANDNPS" form="xmm {k}, xmm, xmm" xed="VANDNPS_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_andnot_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VANDNPS" form="xmm {z}, xmm, xmm" xed="VANDNPS_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_and_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDPD" form="ymm {k}, ymm, ymm" xed="VANDPD_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_and_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDPD" form="ymm {z}, ymm, ymm" xed="VANDPD_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_and_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] AND b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDPD" form="zmm, zmm, zmm" xed="VANDPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_and_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDPD" form="zmm {k}, zmm, zmm" xed="VANDPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_and_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDPD" form="zmm {z}, zmm, zmm" xed="VANDPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_and_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VANDPD" form="xmm {k}, xmm, xmm" xed="VANDPD_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_and_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VANDPD" form="xmm {z}, xmm, xmm" xed="VANDPD_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_and_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDPS" form="ymm {k}, ymm, ymm" xed="VANDPS_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_and_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VANDPS" form="ymm {z}, ymm, ymm" xed="VANDPS_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_and_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] AND b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDPS" form="zmm, zmm, zmm" xed="VANDPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_and_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDPS" form="zmm {k}, zmm, zmm" xed="VANDPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_and_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VANDPS" form="zmm {z}, zmm, zmm" xed="VANDPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_and_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VANDPS" form="xmm {k}, xmm, xmm" xed="VANDPS_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_and_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VANDPS" form="xmm {z}, xmm, xmm" xed="VANDPS_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_broadcast_f32x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 2)*32
+ dst[i+31:i] := a[n+31:n]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X2" form="ymm, xmm" xed="VBROADCASTF32X2_YMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_broadcast_f32x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 2)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X2" form="ymm {k}, xmm" xed="VBROADCASTF32X2_YMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_broadcast_f32x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 2)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X2" form="ymm {z}, xmm" xed="VBROADCASTF32X2_YMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcast_f32x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 2)*32
+ dst[i+31:i] := a[n+31:n]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X2" form="zmm, xmm" xed="VBROADCASTF32X2_ZMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcast_f32x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 2)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X2" form="zmm {k}, xmm" xed="VBROADCASTF32X2_ZMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcast_f32x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the lower 2 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 2)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X2" form="zmm {z}, xmm" xed="VBROADCASTF32X2_ZMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_broadcast_f32x8">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Broadcast the 8 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 8)*32
+ dst[i+31:i] := a[n+31:n]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X8" form="zmm, m256" xed="VBROADCASTF32X8_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_mask_broadcast_f32x8">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Broadcast the 8 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 8)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X8" form="zmm {k}, m256" xed="VBROADCASTF32X8_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_maskz_broadcast_f32x8">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Broadcast the 8 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 8)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X8" form="zmm {z}, m256" xed="VBROADCASTF32X8_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_broadcast_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ n := (j % 2)*64
+ dst[i+63:i] := a[n+63:n]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTF64X2" form="ymm, m128" xed="VBROADCASTF64X2_YMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_mask_broadcast_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ n := (j % 2)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTF64X2" form="ymm {k}, m128" xed="VBROADCASTF64X2_YMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_maskz_broadcast_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ n := (j % 2)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTF64X2" form="ymm {z}, m128" xed="VBROADCASTF64X2_YMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_broadcast_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 2)*64
+ dst[i+63:i] := a[n+63:n]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF64X2" form="zmm, m128" xed="VBROADCASTF64X2_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_mask_broadcast_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 2)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF64X2" form="zmm {k}, m128" xed="VBROADCASTF64X2_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_maskz_broadcast_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the 2 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 2)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF64X2" form="zmm {z}, m128" xed="VBROADCASTF64X2_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_broadcast_i32x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 2)*32
+ dst[i+31:i] := a[n+31:n]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X2" form="ymm, xmm" xed="VBROADCASTI32X2_YMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_broadcast_i32x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 2)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X2" form="ymm {k}, xmm" xed="VBROADCASTI32X2_YMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_broadcast_i32x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 2)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X2" form="ymm {z}, xmm" xed="VBROADCASTI32X2_YMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcast_i32x2">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 2)*32
+ dst[i+31:i] := a[n+31:n]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X2" form="zmm, xmm" xed="VBROADCASTI32X2_ZMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcast_i32x2">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 2)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X2" form="zmm {k}, xmm" xed="VBROADCASTI32X2_ZMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcast_i32x2">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 2)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X2" form="zmm {z}, xmm" xed="VBROADCASTI32X2_ZMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_broadcast_i32x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ n := (j % 2)*32
+ dst[i+31:i] := a[n+31:n]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X2" form="xmm, xmm" xed="VBROADCASTI32X2_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_broadcast_i32x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ n := (j % 2)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X2" form="xmm {k}, xmm" xed="VBROADCASTI32X2_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_broadcast_i32x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the lower 2 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ n := (j % 2)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X2" form="xmm {z}, xmm" xed="VBROADCASTI32X2_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_broadcast_i32x8">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 8)*32
+ dst[i+31:i] := a[n+31:n]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X8" form="zmm, m256" xed="VBROADCASTI32X8_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_mask_broadcast_i32x8">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 8)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X8" form="zmm {k}, m256" xed="VBROADCASTI32X8_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_maskz_broadcast_i32x8">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Broadcast the 8 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 8)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X8" form="zmm {z}, m256" xed="VBROADCASTI32X8_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_broadcast_i64x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ n := (j % 2)*64
+ dst[i+63:i] := a[n+63:n]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTI64X2" form="ymm, m128" xed="VBROADCASTI64X2_YMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_mask_broadcast_i64x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ n := (j % 2)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTI64X2" form="ymm {k}, m128" xed="VBROADCASTI64X2_YMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_maskz_broadcast_i64x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ n := (j % 2)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTI64X2" form="ymm {z}, m128" xed="VBROADCASTI64X2_YMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_broadcast_i64x2">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 2)*64
+ dst[i+63:i] := a[n+63:n]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI64X2" form="zmm, m128" xed="VBROADCASTI64X2_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_mask_broadcast_i64x2">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 2)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI64X2" form="zmm {k}, m128" xed="VBROADCASTI64X2_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_maskz_broadcast_i64x2">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the 2 packed 64-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 2)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI64X2" form="zmm {z}, m128" xed="VBROADCASTI64X2_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="ymm, ymm" xed="VCVTPD2QQ_YMMi64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="ymm {k}, ymm" xed="VCVTPD2QQ_YMMi64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="ymm {z}, ymm" xed="VCVTPD2QQ_YMMi64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="zmm, zmm {er}" xed="VCVTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="zmm, zmm" xed="VCVTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="zmm {k}, zmm {er}" xed="VCVTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="zmm {k}, zmm" xed="VCVTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="zmm {z}, zmm {er}" xed="VCVTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="zmm {z}, zmm" xed="VCVTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="xmm, xmm" xed="VCVTPD2QQ_XMMi64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="xmm {k}, xmm" xed="VCVTPD2QQ_XMMi64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2QQ" form="xmm {z}, xmm" xed="VCVTPD2QQ_XMMi64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="ymm, ymm" xed="VCVTPD2UQQ_YMMu64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="ymm {k}, ymm" xed="VCVTPD2UQQ_YMMu64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="ymm {z}, ymm" xed="VCVTPD2UQQ_YMMu64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="zmm, zmm {er}" xed="VCVTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="zmm, zmm" xed="VCVTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="zmm {k}, zmm {er}" xed="VCVTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="zmm {k}, zmm" xed="VCVTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="zmm {z}, zmm {er}" xed="VCVTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="zmm {z}, zmm" xed="VCVTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="xmm, xmm" xed="VCVTPD2UQQ_XMMu64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="xmm {k}, xmm" xed="VCVTPD2UQQ_XMMu64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2UQQ" form="xmm {z}, xmm" xed="VCVTPD2UQQ_XMMu64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="ymm, xmm" xed="VCVTPS2QQ_YMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="ymm {k}, xmm" xed="VCVTPS2QQ_YMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="ymm {z}, xmm" xed="VCVTPS2QQ_YMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="zmm, ymm {er}" xed="VCVTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="zmm, ymm" xed="VCVTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="zmm {k}, ymm {er}" xed="VCVTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="zmm {k}, ymm" xed="VCVTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="zmm {z}, ymm {er}" xed="VCVTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="zmm {z}, ymm" xed="VCVTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="xmm, xmm" xed="VCVTPS2QQ_XMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="xmm {k}, xmm" xed="VCVTPS2QQ_XMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2QQ" form="xmm {z}, xmm" xed="VCVTPS2QQ_XMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="ymm, xmm" xed="VCVTPS2UQQ_YMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="ymm {k}, xmm" xed="VCVTPS2UQQ_YMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="ymm {z}, xmm" xed="VCVTPS2UQQ_YMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="zmm, ymm {er}" xed="VCVTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="zmm, ymm" xed="VCVTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="zmm {k}, ymm {er}" xed="VCVTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="zmm {k}, ymm" xed="VCVTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="zmm {z}, ymm {er}" xed="VCVTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="zmm {z}, ymm" xed="VCVTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="xmm, xmm" xed="VCVTPS2UQQ_XMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="xmm {k}, xmm" xed="VCVTPS2UQQ_XMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2UQQ" form="xmm {z}, xmm" xed="VCVTPS2UQQ_XMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="ymm, ymm" xed="VCVTQQ2PD_YMMi64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="ymm {k}, ymm" xed="VCVTQQ2PD_YMMi64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="ymm {z}, ymm" xed="VCVTQQ2PD_YMMi64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="zmm, zmm {er}" xed="VCVTQQ2PD_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="zmm, zmm" xed="VCVTQQ2PD_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="zmm {k}, zmm {er}" xed="VCVTQQ2PD_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="zmm {k}, zmm" xed="VCVTQQ2PD_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="zmm {z}, zmm {er}" xed="VCVTQQ2PD_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="zmm {z}, zmm" xed="VCVTQQ2PD_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="xmm, xmm" xed="VCVTQQ2PD_XMMi64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="xmm {k}, xmm" xed="VCVTQQ2PD_XMMi64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTQQ2PD" form="xmm {z}, xmm" xed="VCVTQQ2PD_XMMi64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="xmm, ymm" xed="VCVTQQ2PS_XMMf32_MASKmskw_YMMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="xmm {k}, ymm" xed="VCVTQQ2PS_XMMf32_MASKmskw_YMMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="xmm {z}, ymm" xed="VCVTQQ2PS_XMMf32_MASKmskw_YMMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="ymm, zmm {er}" xed="VCVTQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="ymm, zmm" xed="VCVTQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="ymm {k}, zmm {er}" xed="VCVTQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="ymm {k}, zmm" xed="VCVTQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="ymm {z}, zmm {er}" xed="VCVTQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="ymm {z}, zmm" xed="VCVTQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="xmm, xmm" xed="VCVTQQ2PS_XMMf32_MASKmskw_XMMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="xmm {k}, xmm" xed="VCVTQQ2PS_XMMf32_MASKmskw_XMMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTQQ2PS" form="xmm {z}, xmm" xed="VCVTQQ2PS_XMMf32_MASKmskw_XMMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvttpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="ymm, ymm" xed="VCVTTPD2QQ_YMMi64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvttpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="ymm {k}, ymm" xed="VCVTTPD2QQ_YMMi64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvttpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="ymm {z}, ymm" xed="VCVTTPD2QQ_YMMi64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtt_roundpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="zmm, zmm {sae}" xed="VCVTTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvttpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="zmm, zmm" xed="VCVTTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtt_roundpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="zmm {k}, zmm {sae}" xed="VCVTTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvttpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="zmm {k}, zmm" xed="VCVTTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtt_roundpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="zmm {z}, zmm {sae}" xed="VCVTTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvttpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="zmm {z}, zmm" xed="VCVTTPD2QQ_ZMMi64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="xmm, xmm" xed="VCVTTPD2QQ_XMMi64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvttpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="xmm {k}, xmm" xed="VCVTTPD2QQ_XMMi64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvttpd_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_Int64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2QQ" form="xmm {z}, xmm" xed="VCVTTPD2QQ_XMMi64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvttpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="ymm, ymm" xed="VCVTTPD2UQQ_YMMu64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvttpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="ymm {k}, ymm" xed="VCVTTPD2UQQ_YMMu64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvttpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="ymm {z}, ymm" xed="VCVTTPD2UQQ_YMMu64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtt_roundpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="zmm, zmm {sae}" xed="VCVTTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvttpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="zmm, zmm" xed="VCVTTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtt_roundpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="zmm {k}, zmm {sae}" xed="VCVTTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvttpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="zmm {k}, zmm" xed="VCVTTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtt_roundpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="zmm {z}, zmm {sae}" xed="VCVTTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvttpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="zmm {z}, zmm" xed="VCVTTPD2UQQ_ZMMu64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="xmm, xmm" xed="VCVTTPD2UQQ_XMMu64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvttpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="xmm {k}, xmm" xed="VCVTTPD2UQQ_XMMu64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvttpd_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_FP64_To_UInt64_Truncate(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2UQQ" form="xmm {z}, xmm" xed="VCVTTPD2UQQ_XMMu64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvttps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="ymm, xmm" xed="VCVTTPS2QQ_YMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvttps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="ymm {k}, xmm" xed="VCVTTPS2QQ_YMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvttps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="ymm {z}, xmm" xed="VCVTTPS2QQ_YMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtt_roundps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="zmm, ymm {sae}" xed="VCVTTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvttps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="zmm, ymm" xed="VCVTTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtt_roundps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="zmm {k}, ymm {sae}" xed="VCVTTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvttps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="zmm {k}, ymm" xed="VCVTTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtt_roundps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="zmm {z}, ymm {sae}" xed="VCVTTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvttps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="zmm {z}, ymm" xed="VCVTTPS2QQ_ZMMi64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="xmm, xmm" xed="VCVTTPS2QQ_XMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvttps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="xmm {k}, xmm" xed="VCVTTPS2QQ_XMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvttps_epi64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_Int64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPS2QQ" form="xmm {z}, xmm" xed="VCVTTPS2QQ_XMMi64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvttps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="ymm, xmm" xed="VCVTTPS2UQQ_YMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvttps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="ymm {k}, xmm" xed="VCVTTPS2UQQ_YMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvttps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="ymm {z}, xmm" xed="VCVTTPS2UQQ_YMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtt_roundps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="zmm, ymm {sae}" xed="VCVTTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvttps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="zmm, ymm" xed="VCVTTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtt_roundps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="zmm {k}, ymm {sae}" xed="VCVTTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvttps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="zmm {k}, ymm" xed="VCVTTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtt_roundps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="zmm {z}, ymm {sae}" xed="VCVTTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvttps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="zmm {z}, ymm" xed="VCVTTPS2UQQ_ZMMu64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="xmm, xmm" xed="VCVTTPS2UQQ_XMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvttps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="xmm {k}, xmm" xed="VCVTTPS2UQQ_XMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvttps_epu64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 64-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_UInt64_Truncate(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPS2UQQ" form="xmm {z}, xmm" xed="VCVTTPS2UQQ_XMMu64_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="ymm, ymm" xed="VCVTUQQ2PD_YMMf64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="ymm {k}, ymm" xed="VCVTUQQ2PD_YMMf64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="ymm {z}, ymm" xed="VCVTUQQ2PD_YMMf64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="zmm, zmm {er}" xed="VCVTUQQ2PD_ZMMf64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="zmm, zmm" xed="VCVTUQQ2PD_ZMMf64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="zmm {k}, zmm {er}" xed="VCVTUQQ2PD_ZMMf64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="zmm {k}, zmm" xed="VCVTUQQ2PD_ZMMf64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="zmm {z}, zmm {er}" xed="VCVTUQQ2PD_ZMMf64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="zmm {z}, zmm" xed="VCVTUQQ2PD_ZMMf64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="xmm, xmm" xed="VCVTUQQ2PD_XMMf64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="xmm {k}, xmm" xed="VCVTUQQ2PD_XMMf64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepu64_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PD" form="xmm {z}, xmm" xed="VCVTUQQ2PD_XMMf64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="xmm, ymm" xed="VCVTUQQ2PS_XMMf32_MASKmskw_YMMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="xmm {k}, ymm" xed="VCVTUQQ2PS_XMMf32_MASKmskw_YMMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="xmm {z}, ymm" xed="VCVTUQQ2PS_XMMf32_MASKmskw_YMMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="ymm, zmm {er}" xed="VCVTUQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="ymm, zmm" xed="VCVTUQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="ymm {k}, zmm {er}" xed="VCVTUQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="ymm {k}, zmm" xed="VCVTUQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="ymm {z}, zmm {er}" xed="VCVTUQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="ymm {z}, zmm" xed="VCVTUQQ2PS_YMMf32_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="xmm, xmm" xed="VCVTUQQ2PS_XMMf32_MASKmskw_XMMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="xmm {k}, xmm" xed="VCVTUQQ2PS_XMMf32_MASKmskw_XMMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepu64_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_Int64_To_FP32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTUQQ2PS" form="xmm {z}, xmm" xed="VCVTUQQ2PS_XMMf32_MASKmskw_XMMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_extractf32x8_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[255:0] := a[255:0]
+1: dst[255:0] := a[511:256]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTF32X8" form="ymm, zmm, imm8" xed="VEXTRACTF32X8_YMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_extractf32x8_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[255:0] := a[255:0]
+1: tmp[255:0] := a[511:256]
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTF32X8" form="ymm {k}, zmm, imm8" xed="VEXTRACTF32X8_YMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_extractf32x8_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[255:0] := a[255:0]
+1: tmp[255:0] := a[511:256]
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTF32X8" form="ymm {z}, zmm, imm8" xed="VEXTRACTF32X8_YMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_extractf64x2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF64X2" form="xmm, ymm, imm8" xed="VEXTRACTF64X2_XMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_extractf64x2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF64X2" form="xmm {k}, ymm, imm8" xed="VEXTRACTF64X2_XMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_extractf64x2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF64X2" form="xmm {z}, ymm, imm8" xed="VEXTRACTF64X2_XMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_extractf64x2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[1:0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+2: dst[127:0] := a[383:256]
+3: dst[127:0] := a[511:384]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF64X2" form="xmm, zmm, imm8" xed="VEXTRACTF64X2_XMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_extractf64x2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[1:0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+2: tmp[127:0] := a[383:256]
+3: tmp[127:0] := a[511:384]
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF64X2" form="xmm {k}, zmm, imm8" xed="VEXTRACTF64X2_XMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_extractf64x2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[1:0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+2: tmp[127:0] := a[383:256]
+3: tmp[127:0] := a[511:384]
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF64X2" form="xmm {z}, zmm, imm8" xed="VEXTRACTF64X2_XMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_extracti32x8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[255:0] := a[255:0]
+1: dst[255:0] := a[511:256]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTI32X8" form="ymm, zmm, imm8" xed="VEXTRACTI32X8_YMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_extracti32x8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[255:0] := a[255:0]
+1: tmp[255:0] := a[511:256]
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTI32X8" form="ymm {k}, zmm, imm8" xed="VEXTRACTI32X8_YMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_extracti32x8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 8 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[255:0] := a[255:0]
+1: tmp[255:0] := a[511:256]
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTI32X8" form="ymm {z}, zmm, imm8" xed="VEXTRACTI32X8_YMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_extracti64x2_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI64X2" form="xmm, ymm, imm8" xed="VEXTRACTI64X2_XMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_extracti64x2_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI64X2" form="xmm {k}, ymm, imm8" xed="VEXTRACTI64X2_XMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_extracti64x2_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI64X2" form="xmm {z}, ymm, imm8" xed="VEXTRACTI64X2_XMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_extracti64x2_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[1:0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+2: dst[127:0] := a[383:256]
+3: dst[127:0] := a[511:384]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI64X2" form="xmm, zmm, imm8" xed="VEXTRACTI64X2_XMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_extracti64x2_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[1:0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+2: tmp[127:0] := a[383:256]
+3: tmp[127:0] := a[511:384]
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI64X2" form="xmm {k}, zmm, imm8" xed="VEXTRACTI64X2_XMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_extracti64x2_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 2 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[1:0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+2: tmp[127:0] := a[383:256]
+3: tmp[127:0] := a[511:384]
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI64X2" form="xmm {z}, zmm, imm8" xed="VEXTRACTI64X2_XMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_fpclass_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k".
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0])
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VFPCLASSPD" form="k, ymm, imm8" xed="VFPCLASSPD_MASKmskw_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fpclass_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0])
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VFPCLASSPD" form="k {k}, ymm, imm8" xed="VFPCLASSPD_MASKmskw_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fpclass_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k".
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0])
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VFPCLASSPD" form="k, zmm, imm8" xed="VFPCLASSPD_MASKmskw_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fpclass_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0])
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VFPCLASSPD" form="k {k}, zmm, imm8" xed="VFPCLASSPD_MASKmskw_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fpclass_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k".
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0])
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VFPCLASSPD" form="k, xmm, imm8" xed="VFPCLASSPD_MASKmskw_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fpclass_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed double-precision (64-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := CheckFPClass_FP64(a[i+63:i], imm8[7:0])
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VFPCLASSPD" form="k {k}, xmm, imm8" xed="VFPCLASSPD_MASKmskw_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_fpclass_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k".
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0])
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VFPCLASSPS" form="k, ymm, imm8" xed="VFPCLASSPS_MASKmskw_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fpclass_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0])
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VFPCLASSPS" form="k {k}, ymm, imm8" xed="VFPCLASSPS_MASKmskw_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fpclass_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k".
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0])
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VFPCLASSPS" form="k, zmm, imm8" xed="VFPCLASSPS_MASKmskw_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fpclass_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0])
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VFPCLASSPS" form="k {k}, zmm, imm8" xed="VFPCLASSPS_MASKmskw_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fpclass_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k".
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0])
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VFPCLASSPS" form="k, xmm, imm8" xed="VFPCLASSPS_MASKmskw_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fpclass_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test packed single-precision (32-bit) floating-point elements in "a" for special categories specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).
+ [fpclass_note]</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := CheckFPClass_FP32(a[i+31:i], imm8[7:0])
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VFPCLASSPS" form="k {k}, xmm, imm8" xed="VFPCLASSPS_MASKmskw_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fpclass_sd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test the lower double-precision (64-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k".
+ [fpclass_note]</description>
+ <operation>k[0] := CheckFPClass_FP64(a[63:0], imm8[7:0])
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VFPCLASSSD" form="k, xmm, imm8" xed="VFPCLASSSD_MASKmskw_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fpclass_sd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test the lower double-precision (64-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set).
+ [fpclass_note]</description>
+ <operation>IF k1[0]
+ k[0] := CheckFPClass_FP64(a[63:0], imm8[7:0])
+ELSE
+ k[0] := 0
+FI
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VFPCLASSSD" form="k {k}, xmm, imm8" xed="VFPCLASSSD_MASKmskw_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fpclass_ss_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test the lower single-precision (32-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k.
+ [fpclass_note]</description>
+ <operation>k[0] := CheckFPClass_FP32(a[31:0], imm8[7:0])
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VFPCLASSSS" form="k, xmm, imm8" xed="VFPCLASSSS_MASKmskw_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fpclass_ss_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Test the lower single-precision (32-bit) floating-point element in "a" for special categories specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set).
+ [fpclass_note]</description>
+ <operation>IF k1[0]
+ k[0] := CheckFPClass_FP32(a[31:0], imm8[7:0])
+ELSE
+ k[0] := 0
+FI
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VFPCLASSSS" form="k {k}, xmm, imm8" xed="VFPCLASSSS_MASKmskw_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_insertf32x8">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[511:0] := a[511:0]
+CASE (imm8[0]) OF
+0: dst[255:0] := b[255:0]
+1: dst[511:256] := b[255:0]
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF32X8" form="zmm, zmm, ymm, imm8" xed="VINSERTF32X8_ZMMf32_MASKmskw_ZMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_insertf32x8">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[0]) OF
+0: tmp[255:0] := b[255:0]
+1: tmp[511:256] := b[255:0]
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF32X8" form="zmm {k}, zmm, ymm, imm8" xed="VINSERTF32X8_ZMMf32_MASKmskw_ZMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_insertf32x8">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 256 bits (composed of 8 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[0]) OF
+0: tmp[255:0] := b[255:0]
+1: tmp[511:256] := b[255:0]
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF32X8" form="zmm {z}, zmm, ymm, imm8" xed="VINSERTF32X8_ZMMf32_MASKmskw_ZMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_insertf64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[255:0] := a[255:0]
+CASE imm8[0] OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF64X2" form="ymm, ymm, xmm, imm8" xed="VINSERTF64X2_YMMf64_MASKmskw_YMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_insertf64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF64X2" form="ymm {k}, ymm, xmm, imm8" xed="VINSERTF64X2_YMMf64_MASKmskw_YMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_insertf64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF64X2" form="ymm {z}, ymm, xmm, imm8" xed="VINSERTF64X2_YMMf64_MASKmskw_YMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_insertf64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "dst", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[511:0] := a[511:0]
+CASE imm8[1:0] OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+2: dst[383:256] := b[127:0]
+3: dst[511:384] := b[127:0]
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF64X2" form="zmm, zmm, xmm, imm8" xed="VINSERTF64X2_ZMMf64_MASKmskw_ZMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_insertf64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[1:0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+2: tmp[383:256] := b[127:0]
+3: tmp[511:384] := b[127:0]
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF64X2" form="zmm {k}, zmm, xmm, imm8" xed="VINSERTF64X2_ZMMf64_MASKmskw_ZMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_insertf64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[1:0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+2: tmp[383:256] := b[127:0]
+3: tmp[511:384] := b[127:0]
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF64X2" form="zmm {z}, zmm, xmm, imm8" xed="VINSERTF64X2_ZMMf64_MASKmskw_ZMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_inserti32x8">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 256 bits (composed of 8 packed 32-bit integers) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[511:0] := a[511:0]
+CASE imm8[0] OF
+0: dst[255:0] := b[255:0]
+1: dst[511:256] := b[255:0]
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI32X8" form="zmm, zmm, ymm, imm8" xed="VINSERTI32X8_ZMMu32_MASKmskw_ZMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_inserti32x8">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 256 bits (composed of 8 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[0]) OF
+0: tmp[255:0] := b[255:0]
+1: tmp[511:256] := b[255:0]
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI32X8" form="zmm {k}, zmm, ymm, imm8" xed="VINSERTI32X8_ZMMu32_MASKmskw_ZMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_inserti32x8">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 256 bits (composed of 8 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[0]) OF
+0: tmp[255:0] := b[255:0]
+1: tmp[511:256] := b[255:0]
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI32X8" form="zmm {z}, zmm, ymm, imm8" xed="VINSERTI32X8_ZMMu32_MASKmskw_ZMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_inserti64x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[255:0] := a[255:0]
+CASE imm8[0] OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTI64X2" form="ymm, ymm, xmm, imm8" xed="VINSERTI64X2_YMMu64_MASKmskw_YMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_inserti64x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTI64X2" form="ymm {k}, ymm, xmm, imm8" xed="VINSERTI64X2_YMMu64_MASKmskw_YMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_inserti64x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTI64X2" form="ymm {z}, ymm, xmm, imm8" xed="VINSERTI64X2_YMMu64_MASKmskw_YMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_inserti64x2">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "dst", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[511:0] := a[511:0]
+CASE imm8[1:0] OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+2: dst[383:256] := b[127:0]
+3: dst[511:384] := b[127:0]
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI64X2" form="zmm, zmm, xmm, imm8" xed="VINSERTI64X2_ZMMu64_MASKmskw_ZMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_inserti64x2">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[1:0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+2: tmp[383:256] := b[127:0]
+3: tmp[511:384] := b[127:0]
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI64X2" form="zmm {k}, zmm, xmm, imm8" xed="VINSERTI64X2_ZMMu64_MASKmskw_ZMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_inserti64x2">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 2 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[1:0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+2: tmp[383:256] := b[127:0]
+3: tmp[511:384] := b[127:0]
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI64X2" form="zmm {z}, zmm, xmm, imm8" xed="VINSERTI64X2_ZMMu64_MASKmskw_ZMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_or_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VORPD" form="ymm {k}, ymm, ymm" xed="VORPD_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_or_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VORPD" form="ymm {z}, ymm, ymm" xed="VORPD_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_or_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VORPD" form="zmm {k}, zmm, zmm" xed="VORPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_or_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VORPD" form="zmm {z}, zmm, zmm" xed="VORPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_or_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VORPD" form="zmm, zmm, zmm" xed="VORPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_or_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VORPD" form="xmm {k}, xmm, xmm" xed="VORPD_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_or_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VORPD" form="xmm {z}, xmm, xmm" xed="VORPD_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_or_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VORPS" form="ymm {k}, ymm, ymm" xed="VORPS_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_or_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VORPS" form="ymm {z}, ymm, ymm" xed="VORPS_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_or_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VORPS" form="zmm {k}, zmm, zmm" xed="VORPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_or_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VORPS" form="zmm {z}, zmm, zmm" xed="VORPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_or_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VORPS" form="zmm, zmm, zmm" xed="VORPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_or_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VORPS" form="xmm {k}, xmm, xmm" xed="VORPS_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_or_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VORPS" form="xmm {z}, xmm, xmm" xed="VORPS_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_movepi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 32-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF a[i+31]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPMOVD2M" form="k, ymm" xed="VPMOVD2M_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_movepi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 32-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF a[i+31]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPMOVD2M" form="k, zmm" xed="VPMOVD2M_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_movepi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 32-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF a[i+31]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPMOVD2M" form="k, xmm" xed="VPMOVD2M_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_movm_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Set each packed 32-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := 0xFFFFFFFF
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVM2D" form="ymm" xed="VPMOVM2D_YMMu32_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_movm_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Set each packed 32-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := 0xFFFFFFFF
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVM2D" form="zmm" xed="VPMOVM2D_ZMMu32_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_movm_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Set each packed 32-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := 0xFFFFFFFF
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVM2D" form="xmm" xed="VPMOVM2D_XMMu32_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_movm_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Set each packed 64-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := 0xFFFFFFFFFFFFFFFF
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVM2Q" form="ymm" xed="VPMOVM2Q_YMMu64_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_movm_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Set each packed 64-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := 0xFFFFFFFFFFFFFFFF
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVM2Q" form="zmm" xed="VPMOVM2Q_ZMMu64_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_movm_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Set each packed 64-bit integer in "dst" to all ones or all zeros based on the value of the corresponding bit in "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := 0xFFFFFFFFFFFFFFFF
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVM2Q" form="xmm" xed="VPMOVM2Q_XMMu64_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_movepi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 64-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF a[i+63]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPMOVQ2M" form="k, ymm" xed="VPMOVQ2M_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_movepi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 64-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF a[i+63]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPMOVQ2M" form="k, zmm" xed="VPMOVQ2M_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_movepi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Set each bit of mask register "k" based on the most significant bit of the corresponding packed 64-bit integer in "a".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF a[i+63]
+ k[j] := 1
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPMOVQ2M" form="k, xmm" xed="VPMOVQ2M_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mullo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp[127:0] := a[i+63:i] * b[i+63:i]
+ dst[i+63:i] := tmp[63:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULLQ" form="ymm {k}, ymm, ymm" xed="VPMULLQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mullo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp[127:0] := a[i+63:i] * b[i+63:i]
+ dst[i+63:i] := tmp[63:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULLQ" form="ymm {z}, ymm, ymm" xed="VPMULLQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mullo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ tmp[127:0] := a[i+63:i] * b[i+63:i]
+ dst[i+63:i] := tmp[63:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULLQ" form="ymm, ymm, ymm" xed="VPMULLQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_mullo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp[127:0] := a[i+63:i] * b[i+63:i]
+ dst[i+63:i] := tmp[63:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULLQ" form="zmm {k}, zmm, zmm" xed="VPMULLQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mullo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp[127:0] := a[i+63:i] * b[i+63:i]
+ dst[i+63:i] := tmp[63:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULLQ" form="zmm {z}, zmm, zmm" xed="VPMULLQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mullo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ tmp[127:0] := a[i+63:i] * b[i+63:i]
+ dst[i+63:i] := tmp[63:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULLQ" form="zmm, zmm, zmm" xed="VPMULLQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mullo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp[127:0] := a[i+63:i] * b[i+63:i]
+ dst[i+63:i] := tmp[63:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULLQ" form="xmm {k}, xmm, xmm" xed="VPMULLQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mullo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp[127:0] := a[i+63:i] * b[i+63:i]
+ dst[i+63:i] := tmp[63:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULLQ" form="xmm {z}, xmm, xmm" xed="VPMULLQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mullo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Multiply the packed 64-bit integers in "a" and "b", producing intermediate 128-bit integers, and store the low 64 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ tmp[127:0] := a[i+63:i] * b[i+63:i]
+ dst[i+63:i] := tmp[63:0]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULLQ" form="xmm, xmm, xmm" xed="VPMULLQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_range_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="ymm {k}, ymm, ymm, imm8" xed="VRANGEPD_YMMf64_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_range_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="ymm {z}, ymm, ymm, imm8" xed="VRANGEPD_YMMf64_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_range_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="ymm, ymm, ymm, imm8" xed="VRANGEPD_YMMf64_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_range_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="zmm {k}, zmm, zmm, imm8" xed="VRANGEPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_range_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="zmm {k}, zmm, zmm {sae}, imm8" xed="VRANGEPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_range_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="zmm {z}, zmm, zmm, imm8" xed="VRANGEPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_range_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="zmm {z}, zmm, zmm {sae}, imm8" xed="VRANGEPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_range_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="zmm, zmm, zmm, imm8" xed="VRANGEPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_range_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="zmm, zmm, zmm {sae}, imm8" xed="VRANGEPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_range_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="xmm {k}, xmm, xmm, imm8" xed="VRANGEPD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_range_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="xmm {z}, xmm, xmm, imm8" xed="VRANGEPD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_range_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := RANGE(a[i+63:i], b[i+63:i], imm8[1:0], imm8[3:2])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGEPD" form="xmm, xmm, xmm, imm8" xed="VRANGEPD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_range_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="ymm {k}, ymm, ymm, imm8" xed="VRANGEPS_YMMf32_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_range_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="ymm {z}, ymm, ymm, imm8" xed="VRANGEPS_YMMf32_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_range_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="ymm, ymm, ymm, imm8" xed="VRANGEPS_YMMf32_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_range_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="zmm {k}, zmm, zmm, imm8" xed="VRANGEPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_range_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="zmm {k}, zmm, zmm {sae}, imm8" xed="VRANGEPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_range_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="zmm {z}, zmm, zmm, imm8" xed="VRANGEPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_range_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="zmm {z}, zmm, zmm {sae}, imm8" xed="VRANGEPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_range_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="zmm, zmm, zmm, imm8" xed="VRANGEPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_range_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="zmm, zmm, zmm {sae}, imm8" xed="VRANGEPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_range_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="xmm {k}, xmm, xmm, imm8" xed="VRANGEPS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_range_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="xmm {z}, xmm, xmm, imm8" xed="VRANGEPS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_range_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[63:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := RANGE(a[i+31:i], b[i+31:i], imm8[1:0], imm8[3:2])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGEPS" form="xmm, xmm, xmm, imm8" xed="VRANGEPS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_range_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+IF k[0]
+ dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGESD" form="xmm {k}, xmm, xmm {sae}, imm8" xed="VRANGESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_range_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+IF k[0]
+ dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGESD" form="xmm {k}, xmm, xmm, imm8" xed="VRANGESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_range_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+IF k[0]
+ dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGESD" form="xmm {z}, xmm, xmm {sae}, imm8" xed="VRANGESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_range_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+IF k[0]
+ dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGESD" form="xmm {z}, xmm, xmm, imm8" xed="VRANGESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_range_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[63:0], src2[63:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src1[63:0] : src2[63:0]
+ 1: tmp[63:0] := (src1[63:0] &lt;= src2[63:0]) ? src2[63:0] : src1[63:0]
+ 2: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src1[63:0] : src2[63:0]
+ 3: tmp[63:0] := (ABS(src1[63:0]) &lt;= ABS(src2[63:0])) ? src2[63:0] : src1[63:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[63:0] := (src1[63] &lt;&lt; 63) OR (tmp[62:0])
+ 1: dst[63:0] := tmp[63:0]
+ 2: dst[63:0] := (0 &lt;&lt; 63) OR (tmp[62:0])
+ 3: dst[63:0] := (1 &lt;&lt; 63) OR (tmp[62:0])
+ ESAC
+
+ RETURN dst
+}
+dst[63:0] := RANGE(a[63:0], b[63:0], imm8[1:0], imm8[3:2])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGESD" form="xmm, xmm, xmm {sae}, imm8" xed="VRANGESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_range_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[31:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+IF k[0]
+ dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGESS" form="xmm {k}, xmm, xmm {sae}, imm8" xed="VRANGESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_range_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[31:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+IF k[0]
+ dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGESS" form="xmm {k}, xmm, xmm, imm8" xed="VRANGESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_range_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[31:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+IF k[0]
+ dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGESS" form="xmm {z}, xmm, xmm {sae}, imm8" xed="VRANGESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_range_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit.</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[31:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+IF k[0]
+ dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGESS" form="xmm {z}, xmm, xmm, imm8" xed="VRANGESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_range_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Calculate the max, min, absolute max, or absolute min (depending on control in "imm8") for the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ imm8[1:0] specifies the operation control: 00 = min, 01 = max, 10 = absolute max, 11 = absolute min.
+ imm8[3:2] specifies the sign control: 00 = sign from a, 01 = sign from compare result, 10 = clear sign bit, 11 = set sign bit. [sae_note]</description>
+ <operation>
+DEFINE RANGE(src1[31:0], src2[31:0], opCtl[1:0], signSelCtl[1:0]) {
+ CASE opCtl[1:0] OF
+ 0: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src1[31:0] : src2[31:0]
+ 1: tmp[31:0] := (src1[31:0] &lt;= src2[31:0]) ? src2[31:0] : src1[31:0]
+ 2: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src1[31:0] : src2[31:0]
+ 3: tmp[31:0] := (ABS(src1[31:0]) &lt;= ABS(src2[31:0])) ? src2[31:0] : src1[31:0]
+ ESAC
+
+ CASE signSelCtl[1:0] OF
+ 0: dst[31:0] := (src1[31] &lt;&lt; 31) OR (tmp[30:0])
+ 1: dst[31:0] := tmp[31:0]
+ 2: dst[31:0] := (0 &lt;&lt; 31) OR (tmp[30:0])
+ 3: dst[31:0] := (1 &lt;&lt; 31) OR (tmp[30:0])
+ ESAC
+
+ RETURN dst
+}
+dst[31:0] := RANGE(a[31:0], b[31:0], imm8[1:0], imm8[3:2])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRANGESS" form="xmm, xmm, xmm {sae}, imm8" xed="VRANGESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_reduce_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="ymm {k}, ymm, imm8" xed="VREDUCEPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_reduce_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="ymm {z}, ymm, imm8" xed="VREDUCEPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_reduce_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="ymm, ymm, imm8" xed="VREDUCEPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_reduce_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="zmm {k}, zmm, imm8" xed="VREDUCEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_reduce_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="zmm {k}, zmm {sae}, imm8" xed="VREDUCEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_reduce_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="zmm {z}, zmm, imm8" xed="VREDUCEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_reduce_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="zmm {z}, zmm {sae}, imm8" xed="VREDUCEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_reduce_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="zmm, zmm, imm8" xed="VREDUCEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_reduce_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="zmm, zmm {sae}, imm8" xed="VREDUCEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_reduce_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="xmm {k}, xmm, imm8" xed="VREDUCEPD_XMMf64_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_reduce_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="xmm {z}, xmm, imm8" xed="VREDUCEPD_XMMf64_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_reduce_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed double-precision (64-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ReduceArgumentPD(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCEPD" form="xmm, xmm, imm8" xed="VREDUCEPD_XMMf64_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_reduce_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="ymm {k}, ymm, imm8" xed="VREDUCEPS_YMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_reduce_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="ymm {z}, ymm, imm8" xed="VREDUCEPS_YMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_reduce_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ RETURN tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+}
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="ymm, ymm, imm8" xed="VREDUCEPS_YMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_reduce_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="zmm {k}, zmm, imm8" xed="VREDUCEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_reduce_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="zmm {k}, zmm {sae}, imm8" xed="VREDUCEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_reduce_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="zmm {z}, zmm, imm8" xed="VREDUCEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_reduce_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="zmm {z}, zmm {sae}, imm8" xed="VREDUCEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_reduce_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="zmm, zmm, imm8" xed="VREDUCEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_reduce_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="zmm, zmm {sae}, imm8" xed="VREDUCEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_reduce_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="xmm {k}, xmm, imm8" xed="VREDUCEPS_XMMf32_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_reduce_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="xmm {z}, xmm, imm8" xed="VREDUCEPS_XMMf32_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_reduce_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of packed single-precision (32-bit) floating-point elements in "a" by the number of bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ReduceArgumentPS(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCEPS" form="xmm, xmm, imm8" xed="VREDUCEPS_XMMf32_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_reduce_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+IF k[0]
+ dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESD" form="xmm {k}, xmm, xmm, imm8" xed="VREDUCESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_reduce_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+IF k[0]
+ dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESD" form="xmm {k}, xmm, xmm {sae}, imm8" xed="VREDUCESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_reduce_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+IF k[0]
+ dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESD" form="xmm {z}, xmm, xmm, imm8" xed="VREDUCESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_reduce_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+IF k[0]
+ dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESD" form="xmm {z}, xmm, xmm {sae}, imm8" xed="VREDUCESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_reduce_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESD" form="xmm, xmm, xmm, imm8" xed="VREDUCESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_reduce_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of the lower double-precision (64-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPD(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ tmp[63:0] := src1[63:0] - tmp[63:0]
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := FP64(0.0)
+ FI
+ RETURN tmp[63:0]
+}
+dst[63:0] := ReduceArgumentPD(b[63:0], imm8[7:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESD" form="xmm, xmm, xmm {sae}, imm8" xed="VREDUCESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_reduce_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+IF k[0]
+ dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESS" form="xmm {k}, xmm, xmm, imm8" xed="VREDUCESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_reduce_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+IF k[0]
+ dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESS" form="xmm {k}, xmm, xmm {sae}, imm8" xed="VREDUCESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_reduce_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+IF k[0]
+ dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESS" form="xmm {z}, xmm, xmm, imm8" xed="VREDUCESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_reduce_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+IF k[0]
+ dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESS" form="xmm {z}, xmm, xmm {sae}, imm8" xed="VREDUCESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_reduce_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESS" form="xmm, xmm, xmm, imm8" xed="VREDUCESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_reduce_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Extract the reduced argument of the lower single-precision (32-bit) floating-point element in "b" by the number of bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE ReduceArgumentPS(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ tmp[31:0] := src1[31:0] - tmp[31:0]
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := FP32(0.0)
+ FI
+ RETURN tmp[31:0]
+}
+dst[31:0] := ReduceArgumentPS(b[31:0], imm8[7:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VREDUCESS" form="xmm, xmm, xmm {sae}, imm8" xed="VREDUCESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_xor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VXORPD" form="ymm {k}, ymm, ymm" xed="VXORPD_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_xor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VXORPD" form="ymm {z}, ymm, ymm" xed="VXORPD_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_xor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VXORPD" form="zmm {k}, zmm, zmm" xed="VXORPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_xor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VXORPD" form="zmm {z}, zmm, zmm" xed="VXORPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_xor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VXORPD" form="zmm, zmm, zmm" xed="VXORPD_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_xor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VXORPD" form="xmm {k}, xmm, xmm" xed="VXORPD_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_xor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VXORPD" form="xmm {z}, xmm, xmm" xed="VXORPD_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_xor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VXORPS" form="ymm {k}, ymm, ymm" xed="VXORPS_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_xor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VXORPS" form="ymm {z}, ymm, ymm" xed="VXORPS_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_xor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VXORPS" form="zmm {k}, zmm, zmm" xed="VXORPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_xor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VXORPS" form="zmm {z}, zmm, zmm" xed="VXORPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_xor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VXORPS" form="zmm, zmm, zmm" xed="VXORPS_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_xor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VXORPS" form="xmm {k}, xmm, xmm" xed="VXORPS_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_xor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VXORPS" form="xmm {z}, xmm, xmm" xed="VXORPS_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kadd_mask8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <description>Add 8-bit masks in "a" and "b", and store the result in "k".</description>
+ <operation>
+k[7:0] := a[7:0] + b[7:0]
+k[MAX:8] := 0
+ </operation>
+ <instruction name="KADDB" form="k, k, k" xed="KADDB_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kadd_mask16">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Add 16-bit masks in "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := a[15:0] + b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KADDW" form="k, k, k" xed="KADDW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kand_mask8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <description>Compute the bitwise AND of 8-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[7:0] := a[7:0] AND b[7:0]
+k[MAX:8] := 0
+ </operation>
+ <instruction name="KANDB" form="k, k, k" xed="KANDB_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kandn_mask8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <description>Compute the bitwise NOT of 8-bit masks "a" and then AND with "b", and store the result in "k".</description>
+ <operation>
+k[7:0] := (NOT a[7:0]) AND b[7:0]
+k[MAX:8] := 0
+ </operation>
+ <instruction name="KANDNB" form="k, k, k" xed="KANDNB_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_knot_mask8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <description>Compute the bitwise NOT of 8-bit mask "a", and store the result in "k".</description>
+ <operation>
+k[7:0] := NOT a[7:0]
+k[MAX:8] := 0
+ </operation>
+ <instruction name="KNOTB" form="k, k" xed="KNOTB_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kor_mask8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 8-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[7:0] := a[7:0] OR b[7:0]
+k[MAX:8] := 0
+ </operation>
+ <instruction name="KORB" form="k, k, k" xed="KORB_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kxnor_mask8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XNOR of 8-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[7:0] := NOT (a[7:0] XOR b[7:0])
+k[MAX:8] := 0
+ </operation>
+ <instruction name="KXNORB" form="k, k, k" xed="KXNORB_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kxor_mask8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XOR of 8-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[7:0] := a[7:0] XOR b[7:0]
+k[MAX:8] := 0
+ </operation>
+ <instruction name="KXORB" form="k, k, k" xed="KXORB_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kshiftli_mask8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="unsigned int" varname="count" etype="IMM" immwidth="8"/>
+ <description>Shift the bits of 8-bit mask "a" left by "count" while shifting in zeros, and store the least significant 8 bits of the result in "k".</description>
+ <operation>
+k[MAX:0] := 0
+IF count[7:0] &lt;= 7
+ k[7:0] := a[7:0] &lt;&lt; count[7:0]
+FI
+ </operation>
+ <instruction name="KSHIFTLB" form="k, k, imm8" xed="KSHIFTLB_MASKmskw_MASKmskw_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kshiftri_mask8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="unsigned int" varname="count" etype="IMM" immwidth="8"/>
+ <description>Shift the bits of 8-bit mask "a" right by "count" while shifting in zeros, and store the least significant 8 bits of the result in "k".</description>
+ <operation>
+k[MAX:0] := 0
+IF count[7:0] &lt;= 7
+ k[7:0] := a[7:0] &gt;&gt; count[7:0]
+FI
+ </operation>
+ <instruction name="KSHIFTRB" form="k, k, imm8" xed="KSHIFTRB_MASKmskw_MASKmskw_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_load_mask8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Load</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8*" varname="mem_addr" etype="MASK" memwidth="8"/>
+ <description>Load 8-bit mask from memory into "k".</description>
+ <operation>
+k[7:0] := MEM[mem_addr+7:mem_addr]
+ </operation>
+ <instruction name="KMOVB" form="k, m8" xed="KMOVB_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_store_mask8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__mmask8*" varname="mem_addr" etype="MASK" memwidth="8"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <description>Store 8-bit mask from "a" into memory.</description>
+ <operation>
+MEM[mem_addr+7:mem_addr] := a[7:0]
+ </operation>
+ <instruction name="KMOVB" form="m8, k" xed="KMOVB_MEMu8_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortest_mask8_u8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <parameter type="unsigned char*" varname="all_ones" etype="UI8" memwidth="8"/>
+ <description>Compute the bitwise OR of 8-bit masks "a" and "b". If the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in "all_ones", otherwise store 0 in "all_ones".</description>
+ <operation>
+tmp[7:0] := a[7:0] OR b[7:0]
+IF tmp[7:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+IF tmp[7:0] == 0xFF
+ MEM[all_ones+7:all_ones] := 1
+ELSE
+ MEM[all_ones+7:all_ones] := 0
+FI
+ </operation>
+ <instruction name="KORTESTB" form="k, k" xed="KORTESTB_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortestz_mask8_u8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 8-bit masks "a" and "b". If the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[7:0] := a[7:0] OR b[7:0]
+IF tmp[7:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KORTESTB" form="k, k" xed="KORTESTB_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortestc_mask8_u8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 8-bit masks "a" and "b". If the result is all ones, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[7:0] := a[7:0] OR b[7:0]
+IF tmp[7:0] == 0xFF
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KORTESTB" form="k, k" xed="KORTESTB_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktest_mask8_u8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <parameter type="unsigned char*" varname="and_not" etype="UI8" memwidth="8"/>
+ <description>Compute the bitwise AND of 8-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 in "and_not".</description>
+ <operation>
+tmp1[7:0] := a[7:0] AND b[7:0]
+IF tmp1[7:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+tmp2[7:0] := (NOT a[7:0]) AND b[7:0]
+IF tmp2[7:0] == 0x0
+ MEM[and_not+7:and_not] := 1
+ELSE
+ MEM[and_not+7:and_not] := 0
+FI
+ </operation>
+ <instruction name="KTESTB" form="k, k" xed="KTESTB_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktestz_mask8_u8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <description>Compute the bitwise AND of 8-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[7:0] := a[7:0] AND b[7:0]
+IF tmp[7:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KTESTB" form="k, k" xed="KTESTB_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktestc_mask8_u8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <parameter type="__mmask8" varname="b" etype="MASK"/>
+ <description>Compute the bitwise NOT of 8-bit mask "a" and then AND with "b", if the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[7:0] := (NOT a[7:0]) AND b[7:0]
+IF tmp[7:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KTESTB" form="k, k" xed="KTESTB_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktest_mask16_u8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <parameter type="unsigned char*" varname="and_not" etype="UI8" memwidth="8"/>
+ <description>Compute the bitwise AND of 16-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". Compute the bitwise NOT of "a" and then AND with "b", if the result is all zeros, store 1 in "and_not", otherwise store 0 in "and_not".</description>
+ <operation>
+tmp1[15:0] := a[15:0] AND b[15:0]
+IF tmp1[15:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+tmp2[15:0] := (NOT a[15:0]) AND b[15:0]
+IF tmp2[15:0] == 0x0
+ MEM[and_not+7:and_not] := 1
+ELSE
+ MEM[and_not+7:and_not] := 0
+FI
+ </operation>
+ <instruction name="KTESTW" form="k, k" xed="KTESTW_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktestz_mask16_u8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise AND of 16-bit masks "a" and "b", and if the result is all zeros, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[15:0] := a[15:0] AND b[15:0]
+IF tmp[15:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KTESTW" form="k, k" xed="KTESTW_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_ktestc_mask16_u8">
+ <type>Mask</type>
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise NOT of 16-bit mask "a" and then AND with "b", if the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[15:0] := (NOT a[15:0]) AND b[15:0]
+IF tmp[15:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KTESTW" form="k, k" xed="KTESTW_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_cvtmask8_u32">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="a" etype="MASK"/>
+ <description>Convert 8-bit mask "a" into an integer value, and store the result in "dst".</description>
+ <operation>
+dst := ZeroExtend32(a[7:0])
+ </operation>
+ <instruction name="KMOVB" form="r32, k" xed="KMOVB_GPR32u32_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_cvtu32_mask8">
+ <CPUID>AVX512DQ</CPUID>
+ <category>Mask</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="unsigned int" varname="a" etype="UI8"/>
+ <description>Convert integer value "a" into an 8-bit mask, and store the result in "k".</description>
+ <operation>
+k := a[7:0]
+ </operation>
+ <instruction name="KMOVB" form="k, r32" xed="KMOVB_MASKmskw_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_exp2a23_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-23. [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := POW(FP32(2.0), a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PS" form="zmm, zmm {sae}" xed="VEXP2PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_exp2a23_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-23.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := POW(FP32(2.0), a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PS" form="zmm, zmm" xed="VEXP2PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_exp2a23_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POW(FP32(2.0), a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PS" form="zmm {k}, zmm {sae}" xed="VEXP2PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_exp2a23_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POW(FP32(2.0), a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PS" form="zmm {k}, zmm" xed="VEXP2PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_exp2a23_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POW(FP32(2.0), a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PS" form="zmm {z}, zmm {sae}" xed="VEXP2PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_exp2a23_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POW(FP32(2.0), a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PS" form="zmm {z}, zmm" xed="VEXP2PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_exp2a23_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-23. [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := POW(2.0, a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PD" form="zmm, zmm {sae}" xed="VEXP2PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_exp2a23_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-23.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := POW(2.0, a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PD" form="zmm, zmm" xed="VEXP2PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_exp2a23_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POW(2.0, a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PD" form="zmm {k}, zmm {sae}" xed="VEXP2PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_exp2a23_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POW(2.0, a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PD" form="zmm {k}, zmm" xed="VEXP2PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_exp2a23_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23. [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POW(2.0, a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PD" form="zmm {z}, zmm {sae}" xed="VEXP2PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_exp2a23_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-23.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POW(2.0, a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP2PD" form="zmm {z}, zmm" xed="VEXP2PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rcp28_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+dst[63:0] := (1.0 / b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SD" form="xmm, xmm, xmm {sae}" xed="VRCP28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rcp28_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+dst[63:0] := (1.0 / b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SD" form="xmm, xmm, xmm" xed="VRCP28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rcp28_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SD" form="xmm {k}, xmm, xmm {sae}" xed="VRCP28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rcp28_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SD" form="xmm {k}, xmm, xmm" xed="VRCP28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rcp28_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SD" form="xmm {z}, xmm, xmm {sae}" xed="VRCP28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rcp28_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SD" form="xmm {z}, xmm, xmm" xed="VRCP28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rcp28_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst". The maximum relative error for this approximation is less than 2^-28, and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note]</description>
+ <operation>
+dst[31:0] := (1.0 / b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SS" form="xmm, xmm, xmm {sae}" xed="VRCP28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rcp28_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+dst[31:0] := (1.0 / b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SS" form="xmm, xmm, xmm" xed="VRCP28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rcp28_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SS" form="xmm {k}, xmm, xmm {sae}" xed="VRCP28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rcp28_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SS" form="xmm {k}, xmm, xmm" xed="VRCP28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rcp28_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SS" form="xmm {z}, xmm, xmm {sae}" xed="VRCP28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rcp28_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP28SS" form="xmm {z}, xmm, xmm" xed="VRCP28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rcp28_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PS" form="zmm, zmm {sae}" xed="VRCP28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rcp28_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PS" form="zmm, zmm" xed="VRCP28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rcp28_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PS" form="zmm {k}, zmm {sae}" xed="VRCP28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rcp28_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PS" form="zmm {k}, zmm" xed="VRCP28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rcp28_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PS" form="zmm {z}, zmm {sae}" xed="VRCP28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rcp28_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PS" form="zmm {z}, zmm" xed="VRCP28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rcp28_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PD" form="zmm, zmm {sae}" xed="VRCP28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rcp28_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PD" form="zmm, zmm" xed="VRCP28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rcp28_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PD" form="zmm {k}, zmm {sae}" xed="VRCP28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rcp28_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PD" form="zmm {k}, zmm" xed="VRCP28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rcp28_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PD" form="zmm {z}, zmm {sae}" xed="VRCP28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rcp28_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRCP28PD" form="zmm {z}, zmm" xed="VRCP28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rsqrt28_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+dst[63:0] := (1.0 / SQRT(b[63:0]))
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SD" form="xmm, xmm, xmm {sae}" xed="VRSQRT28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rsqrt28_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+dst[63:0] := (1.0 / SQRT(b[63:0]))
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SD" form="xmm, xmm, xmm" xed="VRSQRT28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rsqrt28_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / SQRT(b[63:0]))
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SD" form="xmm {k}, xmm, xmm {sae}" xed="VRSQRT28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rsqrt28_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / SQRT(b[63:0]))
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SD" form="xmm {k}, xmm, xmm" xed="VRSQRT28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rsqrt28_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / SQRT(b[63:0]))
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SD" form="xmm {z}, xmm, xmm {sae}" xed="VRSQRT28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rsqrt28_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / SQRT(b[63:0]))
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SD" form="xmm {z}, xmm, xmm" xed="VRSQRT28SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rsqrt28_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+dst[31:0] := (1.0 / SQRT(b[31:0]))
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SS" form="xmm, xmm, xmm {sae}" xed="VRSQRT28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rsqrt28_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+dst[31:0] := (1.0 / SQRT(b[31:0]))
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SS" form="xmm, xmm, xmm" xed="VRSQRT28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rsqrt28_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / SQRT(b[31:0]))
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SS" form="xmm {k}, xmm, xmm {sae}" xed="VRSQRT28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rsqrt28_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / SQRT(b[31:0]))
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SS" form="xmm {k}, xmm, xmm" xed="VRSQRT28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rsqrt28_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / SQRT(b[31:0]))
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SS" form="xmm {z}, xmm, xmm {sae}" xed="VRSQRT28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rsqrt28_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / SQRT(b[31:0]))
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT28SS" form="xmm {z}, xmm, xmm" xed="VRSQRT28SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rsqrt28_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PS" form="zmm, zmm {sae}" xed="VRSQRT28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rsqrt28_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PS" form="zmm, zmm" xed="VRSQRT28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rsqrt28_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PS" form="zmm {k}, zmm {sae}" xed="VRSQRT28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rsqrt28_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PS" form="zmm {k}, zmm" xed="VRSQRT28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rsqrt28_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PS" form="zmm {z}, zmm {sae}" xed="VRSQRT28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rsqrt28_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PS" form="zmm {z}, zmm" xed="VRSQRT28PS_ZMMf32_MASKmskw_ZMMf32_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rsqrt28_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst". The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PD" form="zmm, zmm {sae}" xed="VRSQRT28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rsqrt28_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst". The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PD" form="zmm, zmm" xed="VRSQRT28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rsqrt28_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PD" form="zmm {k}, zmm {sae}" xed="VRSQRT28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rsqrt28_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PD" form="zmm {k}, zmm" xed="VRSQRT28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rsqrt28_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28. [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PD" form="zmm {z}, zmm {sae}" xed="VRSQRT28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rsqrt28_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512ER</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-28.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VRSQRT28PD" form="zmm {z}, zmm" xed="VRSQRT28PD_ZMMf64_MASKmskw_ZMMf64_AVX512ER"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_add_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VADDPD" form="ymm {k}, ymm, ymm" xed="VADDPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_add_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VADDPD" form="ymm {z}, ymm, ymm" xed="VADDPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_add_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDPD" form="xmm {k}, xmm, xmm" xed="VADDPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_add_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDPD" form="xmm {z}, xmm, xmm" xed="VADDPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_add_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VADDPS" form="ymm {k}, ymm, ymm" xed="VADDPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_add_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VADDPS" form="ymm {z}, ymm, ymm" xed="VADDPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_add_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDPS" form="xmm {k}, xmm, xmm" xed="VADDPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_add_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDPS" form="xmm {z}, xmm, xmm" xed="VADDPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_alignr_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="3"/>
+ <description>Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 32 bytes (8 elements) in "dst".</description>
+ <operation>
+temp[511:256] := a[255:0]
+temp[255:0] := b[255:0]
+temp[511:0] := temp[511:0] &gt;&gt; (32*imm8[2:0])
+dst[255:0] := temp[255:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VALIGND" form="ymm, ymm, ymm, imm8" xed="VALIGND_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_alignr_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="3"/>
+ <description>Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 32 bytes (8 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+temp[511:256] := a[255:0]
+temp[255:0] := b[255:0]
+temp[511:0] := temp[511:0] &gt;&gt; (32*imm8[2:0])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := temp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VALIGND" form="ymm {k}, ymm, ymm, imm8" xed="VALIGND_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_alignr_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="3"/>
+ <description>Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 32 bytes (8 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+temp[511:256] := a[255:0]
+temp[255:0] := b[255:0]
+temp[511:0] := temp[511:0] &gt;&gt; (32*imm8[2:0])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := temp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VALIGND" form="ymm {z}, ymm, ymm, imm8" xed="VALIGND_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_alignr_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 16 bytes (4 elements) in "dst".</description>
+ <operation>
+temp[255:128] := a[127:0]
+temp[127:0] := b[127:0]
+temp[255:0] := temp[255:0] &gt;&gt; (32*imm8[1:0])
+dst[127:0] := temp[127:0]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VALIGND" form="xmm, xmm, xmm, imm8" xed="VALIGND_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_alignr_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 16 bytes (4 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+temp[255:128] := a[127:0]
+temp[127:0] := b[127:0]
+temp[255:0] := temp[255:0] &gt;&gt; (32*imm8[1:0])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := temp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VALIGND" form="xmm {k}, xmm, xmm, imm8" xed="VALIGND_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_alignr_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 16 bytes (4 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+temp[255:128] := a[127:0]
+temp[127:0] := b[127:0]
+temp[255:0] := temp[255:0] &gt;&gt; (32*imm8[1:0])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := temp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VALIGND" form="xmm {z}, xmm, xmm, imm8" xed="VALIGND_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_alignr_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 32 bytes (4 elements) in "dst".</description>
+ <operation>
+temp[511:256] := a[255:0]
+temp[255:0] := b[255:0]
+temp[511:0] := temp[511:0] &gt;&gt; (64*imm8[1:0])
+dst[255:0] := temp[255:0]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VALIGNQ" form="ymm, ymm, ymm, imm8" xed="VALIGNQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_alignr_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 32 bytes (4 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+temp[511:256] := a[255:0]
+temp[255:0] := b[255:0]
+temp[511:0] := temp[511:0] &gt;&gt; (64*imm8[1:0])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := temp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VALIGNQ" form="ymm {k}, ymm, ymm, imm8" xed="VALIGNQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_alignr_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Concatenate "a" and "b" into a 64-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 32 bytes (4 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+temp[511:256] := a[255:0]
+temp[255:0] := b[255:0]
+temp[511:0] := temp[511:0] &gt;&gt; (64*imm8[1:0])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := temp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VALIGNQ" form="ymm {z}, ymm, ymm, imm8" xed="VALIGNQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_alignr_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 16 bytes (2 elements) in "dst".</description>
+ <operation>
+temp[255:128] := a[127:0]
+temp[127:0] := b[127:0]
+temp[255:0] := temp[255:0] &gt;&gt; (64*imm8[0])
+dst[127:0] := temp[127:0]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VALIGNQ" form="xmm, xmm, xmm, imm8" xed="VALIGNQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_alignr_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 16 bytes (2 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+temp[255:128] := a[127:0]
+temp[127:0] := b[127:0]
+temp[255:0] := temp[255:0] &gt;&gt; (64*imm8[0])
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := temp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VALIGNQ" form="xmm {k}, xmm, xmm, imm8" xed="VALIGNQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_alignr_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Concatenate "a" and "b" into a 32-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 16 bytes (2 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+temp[255:128] := a[127:0]
+temp[127:0] := b[127:0]
+temp[255:0] := temp[255:0] &gt;&gt; (64*imm8[0])
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := temp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VALIGNQ" form="xmm {z}, xmm, xmm, imm8" xed="VALIGNQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_blend_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBLENDMPD" form="ymm {k}, ymm, ymm" xed="VBLENDMPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_blend_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VBLENDMPD" form="xmm {k}, xmm, xmm" xed="VBLENDMPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_blend_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBLENDMPS" form="ymm {k}, ymm, ymm" xed="VBLENDMPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_blend_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VBLENDMPS" form="xmm {k}, xmm, xmm" xed="VBLENDMPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_broadcast_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 4)*32
+ dst[i+31:i] := a[n+31:n]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X4" form="ymm, m128" xed="VBROADCASTF32X4_YMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_mask_broadcast_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 4)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X4" form="ymm {k}, m128" xed="VBROADCASTF32X4_YMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_maskz_broadcast_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 4)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X4" form="ymm {z}, m128" xed="VBROADCASTF32X4_YMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_broadcast_i32x4">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 4)*32
+ dst[i+31:i] := a[n+31:n]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X4" form="ymm, m128" xed="VBROADCASTI32X4_YMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_mask_broadcast_i32x4">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 4)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X4" form="ymm {k}, m128" xed="VBROADCASTI32X4_YMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm256_maskz_broadcast_i32x4">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := (j % 4)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X4" form="ymm {z}, m128" xed="VBROADCASTI32X4_YMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_broadcastsd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTSD" form="ymm {k}, xmm" xed="VBROADCASTSD_YMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_broadcastsd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTSD" form="ymm {z}, xmm" xed="VBROADCASTSD_YMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_broadcastss_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTSS" form="ymm {k}, xmm" xed="VBROADCASTSS_YMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_broadcastss_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VBROADCASTSS" form="ymm {z}, xmm" xed="VBROADCASTSS_YMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_broadcastss_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VBROADCASTSS" form="xmm {k}, xmm" xed="VBROADCASTSS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_broadcastss_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VBROADCASTSS" form="xmm {z}, xmm" xed="VBROADCASTSS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmp_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, ymm, ymm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmp_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, ymm, ymm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, xmm, xmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, xmm, xmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmp_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, ymm, ymm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmp_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, ymm, ymm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, xmm, xmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, xmm, xmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compress_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := src[255:m]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCOMPRESSPD" form="ymm {k}, ymm" xed="VCOMPRESSPD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compressstoreu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 64
+m := base_addr
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ MEM[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VCOMPRESSPD" form="m256 {k}, ymm" xed="VCOMPRESSPD_MEMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_compress_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCOMPRESSPD" form="ymm {z}, ymm" xed="VCOMPRESSPD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compress_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := src[127:m]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCOMPRESSPD" form="xmm {k}, xmm" xed="VCOMPRESSPD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compressstoreu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 64
+m := base_addr
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ MEM[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VCOMPRESSPD" form="m128 {k}, xmm" xed="VCOMPRESSPD_MEMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_compress_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCOMPRESSPD" form="xmm {z}, xmm" xed="VCOMPRESSPD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compress_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := src[255:m]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCOMPRESSPS" form="ymm {k}, ymm" xed="VCOMPRESSPS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compressstoreu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 32
+m := base_addr
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ MEM[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VCOMPRESSPS" form="m256 {k}, ymm" xed="VCOMPRESSPS_MEMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_compress_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCOMPRESSPS" form="ymm {z}, ymm" xed="VCOMPRESSPS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compress_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := src[127:m]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCOMPRESSPS" form="xmm {k}, xmm" xed="VCOMPRESSPS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compressstoreu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 32
+m := base_addr
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ MEM[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VCOMPRESSPS" form="m128 {k}, xmm" xed="VCOMPRESSPS_MEMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_compress_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCOMPRESSPS" form="xmm {z}, xmm" xed="VCOMPRESSPS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ IF k[j]
+ dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
+ ELSE
+ dst[m+63:m] := src[m+63:m]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTDQ2PD" form="ymm {k}, xmm" xed="VCVTDQ2PD_YMMf64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ IF k[j]
+ dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
+ ELSE
+ dst[m+63:m] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTDQ2PD" form="ymm {z}, xmm" xed="VCVTDQ2PD_YMMf64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ IF k[j]
+ dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
+ ELSE
+ dst[m+63:m] := src[m+63:m]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTDQ2PD" form="xmm {k}, xmm" xed="VCVTDQ2PD_XMMf64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ IF k[j]
+ dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
+ ELSE
+ dst[m+63:m] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTDQ2PD" form="xmm {z}, xmm" xed="VCVTDQ2PD_XMMf64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTDQ2PS" form="ymm {k}, ymm" xed="VCVTDQ2PS_YMMf32_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTDQ2PS" form="ymm {z}, ymm" xed="VCVTDQ2PS_YMMf32_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTDQ2PS" form="xmm {k}, xmm" xed="VCVTDQ2PS_XMMf32_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTDQ2PS" form="xmm {z}, xmm" xed="VCVTDQ2PS_XMMf32_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2DQ" form="xmm {k}, ymm" xed="VCVTPD2DQ_XMMi32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2DQ" form="xmm {z}, ymm" xed="VCVTPD2DQ_XMMi32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPD2DQ" form="xmm {k}, xmm" xed="VCVTPD2DQ_XMMi32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPD2DQ" form="xmm {z}, xmm" xed="VCVTPD2DQ_XMMi32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="xmm {k}, ymm" xed="VCVTPD2PS_XMMf32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="xmm {z}, ymm" xed="VCVTPD2PS_XMMf32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="xmm {k}, xmm" xed="VCVTPD2PS_XMMf32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="xmm {z}, xmm" xed="VCVTPD2PS_XMMf32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="xmm, ymm" xed="VCVTPD2UDQ_XMMu32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="xmm {k}, ymm" xed="VCVTPD2UDQ_XMMu32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="xmm {z}, ymm" xed="VCVTPD2UDQ_XMMu32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="xmm, xmm" xed="VCVTPD2UDQ_XMMu32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="xmm {k}, xmm" xed="VCVTPD2UDQ_XMMu32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="xmm {z}, xmm" xed="VCVTPD2UDQ_XMMu32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtph_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="FP16"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*16
+ IF k[j]
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="ymm {k}, xmm" xed="VCVTPH2PS_YMMf32_MASKmskw_XMMf16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtph_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="FP16"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*16
+ IF k[j]
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="ymm {z}, xmm" xed="VCVTPH2PS_YMMf32_MASKmskw_XMMf16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtph_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="FP16"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*16
+ IF k[j]
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="xmm {k}, xmm" xed="VCVTPH2PS_XMMf32_MASKmskw_XMMf16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtph_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="FP16"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*16
+ IF k[j]
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="xmm {z}, xmm" xed="VCVTPH2PS_XMMf32_MASKmskw_XMMf16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2DQ" form="ymm {k}, ymm" xed="VCVTPS2DQ_YMMi32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2DQ" form="ymm {z}, ymm" xed="VCVTPS2DQ_YMMi32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2DQ" form="xmm {k}, xmm" xed="VCVTPS2DQ_XMMi32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2DQ" form="xmm {z}, xmm" xed="VCVTPS2DQ_XMMi32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvt_roundps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_ROUND_MODE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="xmm {k}, ymm, imm8" xed="VCVTPS2PH_XMMf16_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_ROUND_MODE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="xmm {k}, ymm, imm8" xed="VCVTPS2PH_XMMf16_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvt_roundps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_ROUND_MODE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="xmm {z}, ymm, imm8" xed="VCVTPS2PH_XMMf16_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_ROUND_MODE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="xmm {z}, ymm, imm8" xed="VCVTPS2PH_XMMf16_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvt_roundps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_ROUND_MODE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+FOR j := 0 to 3
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="xmm {k}, xmm, imm8" xed="VCVTPS2PH_XMMf16_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_ROUND_MODE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+FOR j := 0 to 3
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="xmm {k}, xmm, imm8" xed="VCVTPS2PH_XMMf16_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvt_roundps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_ROUND_MODE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+FOR j := 0 to 3
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="xmm {z}, xmm, imm8" xed="VCVTPS2PH_XMMf16_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_ROUND_MODE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+FOR j := 0 to 3
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="xmm {z}, xmm, imm8" xed="VCVTPS2PH_XMMf16_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="ymm, ymm" xed="VCVTPS2UDQ_YMMu32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="ymm {k}, ymm" xed="VCVTPS2UDQ_YMMu32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="ymm {z}, ymm" xed="VCVTPS2UDQ_YMMu32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="xmm, xmm" xed="VCVTPS2UDQ_XMMu32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="xmm {k}, xmm" xed="VCVTPS2UDQ_XMMu32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="xmm {z}, xmm" xed="VCVTPS2UDQ_XMMu32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvttpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2DQ" form="xmm {k}, ymm" xed="VCVTTPD2DQ_XMMi32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvttpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2DQ" form="xmm {z}, ymm" xed="VCVTTPD2DQ_XMMi32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvttpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTTPD2DQ" form="xmm {k}, xmm" xed="VCVTTPD2DQ_XMMi32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvttpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTTPD2DQ" form="xmm {z}, xmm" xed="VCVTTPD2DQ_XMMi32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvttpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="xmm, ymm" xed="VCVTTPD2UDQ_XMMu32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvttpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="xmm {k}, ymm" xed="VCVTTPD2UDQ_XMMu32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvttpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="xmm {z}, ymm" xed="VCVTTPD2UDQ_XMMu32_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="xmm, xmm" xed="VCVTTPD2UDQ_XMMu32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvttpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="xmm {k}, xmm" xed="VCVTTPD2UDQ_XMMu32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvttpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="xmm {z}, xmm" xed="VCVTTPD2UDQ_XMMu32_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvttps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2DQ" form="ymm {k}, ymm" xed="VCVTTPS2DQ_YMMi32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvttps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2DQ" form="ymm {z}, ymm" xed="VCVTTPS2DQ_YMMi32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvttps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPS2DQ" form="xmm {k}, xmm" xed="VCVTTPS2DQ_XMMi32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvttps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPS2DQ" form="xmm {z}, xmm" xed="VCVTTPS2DQ_XMMi32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvttps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="ymm, ymm" xed="VCVTTPS2UDQ_YMMu32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvttps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="ymm {k}, ymm" xed="VCVTTPS2UDQ_YMMu32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvttps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="ymm {z}, ymm" xed="VCVTTPS2UDQ_YMMu32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="xmm, xmm" xed="VCVTTPS2UDQ_XMMu32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvttps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="xmm {k}, xmm" xed="VCVTTPS2UDQ_XMMu32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvttps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="xmm {z}, xmm" xed="VCVTTPS2UDQ_XMMu32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtepu32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_Int32_To_FP64(a[l+31:l])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PD" form="ymm, xmm" xed="VCVTUDQ2PD_YMMf64_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepu32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_Int32_To_FP64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PD" form="ymm {k}, xmm" xed="VCVTUDQ2PD_YMMf64_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepu32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PD" form="ymm {z}, xmm" xed="VCVTUDQ2PD_YMMf64_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtepu32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PD" form="xmm, xmm" xed="VCVTUDQ2PD_XMMf64_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepu32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PD" form="xmm {k}, xmm" xed="VCVTUDQ2PD_XMMf64_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepu32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PD" form="xmm {z}, xmm" xed="VCVTUDQ2PD_XMMf64_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_div_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDIVPD" form="ymm {k}, ymm, ymm" xed="VDIVPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_div_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDIVPD" form="ymm {z}, ymm, ymm" xed="VDIVPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_div_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVPD" form="xmm {k}, xmm, xmm" xed="VDIVPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_div_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVPD" form="xmm {z}, xmm, xmm" xed="VDIVPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_div_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDIVPS" form="ymm {k}, ymm, ymm" xed="VDIVPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_div_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDIVPS" form="ymm {z}, ymm, ymm" xed="VDIVPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_div_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVPS" form="xmm {k}, xmm, xmm" xed="VDIVPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_div_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVPS" form="xmm {z}, xmm, xmm" xed="VDIVPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expand_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="ymm {k}, ymm" xed="VEXPANDPD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expandloadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="ymm {k}, m256" xed="VEXPANDPD_YMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expand_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="ymm {z}, ymm" xed="VEXPANDPD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expandloadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="ymm {z}, m256" xed="VEXPANDPD_YMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expand_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="xmm {k}, xmm" xed="VEXPANDPD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expandloadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="xmm {k}, m128" xed="VEXPANDPD_XMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expand_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="xmm {z}, xmm " xed="VEXPANDPD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expandloadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="xmm {z}, m128" xed="VEXPANDPD_XMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expand_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="ymm {k}, ymm" xed="VEXPANDPS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expandloadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="ymm {k}, m256" xed="VEXPANDPS_YMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expand_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="ymm {z}, ymm" xed="VEXPANDPS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expandloadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="ymm {z}, m256" xed="VEXPANDPS_YMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expand_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="xmm {k}, xmm" xed="VEXPANDPS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expandloadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="xmm {k}, m128" xed="VEXPANDPS_XMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expand_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="xmm {z}, xmm" xed="VEXPANDPS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expandloadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="xmm {z}, m128" xed="VEXPANDPS_XMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_extractf32x4_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF32X4" form="xmm, ymm, imm8" xed="VEXTRACTF32X4_XMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_extractf32x4_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF32X4" form="xmm {k}, ymm, imm8" xed="VEXTRACTF32X4_XMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_extractf32x4_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF32X4" form="xmm {z}, ymm, imm8" xed="VEXTRACTF32X4_XMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_extracti32x4_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI32X4" form="xmm, ymm, imm8" xed="VEXTRACTI32X4_XMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_extracti32x4_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI32X4" form="xmm {k}, ymm, imm8" xed="VEXTRACTI32X4_XMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_extracti32x4_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI32X4" form="xmm {z}, ymm, imm8" xed="VEXTRACTI32X4_XMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_fixupimm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN: j := 0
+ SNAN_TOKEN: j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="ymm, ymm, ymm, imm8" xed="VFIXUPIMMPD_YMMf64_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fixupimm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="ymm {k}, ymm, ymm, imm8" xed="VFIXUPIMMPD_YMMf64_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fixupimm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="ymm {z}, ymm, ymm, imm8" xed="VFIXUPIMMPD_YMMf64_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fixupimm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="xmm, xmm, xmm, imm8" xed="VFIXUPIMMPD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fixupimm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="xmm {k}, xmm, xmm, imm8" xed="VFIXUPIMMPD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fixupimm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="xmm {z}, xmm, xmm, imm8" xed="VFIXUPIMMPD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_fixupimm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="ymm, ymm, ymm, imm8" xed="VFIXUPIMMPS_YMMf32_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fixupimm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="ymm {k}, ymm, ymm, imm8" xed="VFIXUPIMMPS_YMMf32_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fixupimm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="ymm {z}, ymm, ymm, imm8" xed="VFIXUPIMMPS_YMMf32_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fixupimm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="xmm, xmm, xmm, imm8" xed="VFIXUPIMMPS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fixupimm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="xmm {k}, xmm, xmm, imm8" xed="VFIXUPIMMPS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fixupimm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="xmm {z}, xmm, xmm, imm8" xed="VFIXUPIMMPS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="ymm {k}, ymm, ymm" xed="VFMADD132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="ymm {k}, ymm, ymm" xed="VFMADD213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="ymm {k}, ymm, ymm" xed="VFMADD231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="ymm {k}, ymm, ymm" xed="VFMADD132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="ymm {k}, ymm, ymm" xed="VFMADD213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="ymm {k}, ymm, ymm" xed="VFMADD231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="ymm {z}, ymm, ymm" xed="VFMADD132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="ymm {z}, ymm, ymm" xed="VFMADD213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="ymm {z}, ymm, ymm" xed="VFMADD231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="xmm {k}, xmm, xmm" xed="VFMADD132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="xmm {k}, xmm, xmm" xed="VFMADD213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="xmm {k}, xmm, xmm" xed="VFMADD231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="xmm {k}, xmm, xmm" xed="VFMADD132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="xmm {k}, xmm, xmm" xed="VFMADD213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="xmm {k}, xmm, xmm" xed="VFMADD231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="xmm {z}, xmm, xmm" xed="VFMADD132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="xmm {z}, xmm, xmm" xed="VFMADD213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="xmm {z}, xmm, xmm" xed="VFMADD231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="ymm {k}, ymm, ymm" xed="VFMADD132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="ymm {k}, ymm, ymm" xed="VFMADD213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="ymm {k}, ymm, ymm" xed="VFMADD231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="ymm {k}, ymm, ymm" xed="VFMADD132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="ymm {k}, ymm, ymm" xed="VFMADD213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="ymm {k}, ymm, ymm" xed="VFMADD231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="ymm {z}, ymm, ymm" xed="VFMADD132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="ymm {z}, ymm, ymm" xed="VFMADD213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="ymm {z}, ymm, ymm" xed="VFMADD231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="xmm {k}, xmm, xmm" xed="VFMADD132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="xmm {k}, xmm, xmm" xed="VFMADD213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="xmm {k}, xmm, xmm" xed="VFMADD231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="xmm {k}, xmm, xmm" xed="VFMADD132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="xmm {k}, xmm, xmm" xed="VFMADD213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="xmm {k}, xmm, xmm" xed="VFMADD231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="xmm {z}, xmm, xmm" xed="VFMADD132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="xmm {z}, xmm, xmm" xed="VFMADD213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="xmm {z}, xmm, xmm" xed="VFMADD231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="ymm {k}, ymm, ymm" xed="VFMADDSUB132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="ymm {k}, ymm, ymm" xed="VFMADDSUB213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="ymm {k}, ymm, ymm" xed="VFMADDSUB231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="ymm {k}, ymm, ymm" xed="VFMADDSUB132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="ymm {k}, ymm, ymm" xed="VFMADDSUB213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="ymm {k}, ymm, ymm" xed="VFMADDSUB231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="ymm {z}, ymm, ymm" xed="VFMADDSUB132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="ymm {z}, ymm, ymm" xed="VFMADDSUB213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="ymm {z}, ymm, ymm" xed="VFMADDSUB231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="xmm {k}, xmm, xmm" xed="VFMADDSUB132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="xmm {k}, xmm, xmm" xed="VFMADDSUB213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="xmm {k}, xmm, xmm" xed="VFMADDSUB231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="xmm {k}, xmm, xmm" xed="VFMADDSUB132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="xmm {k}, xmm, xmm" xed="VFMADDSUB213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="xmm {k}, xmm, xmm" xed="VFMADDSUB231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="xmm {z}, xmm, xmm" xed="VFMADDSUB132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="xmm {z}, xmm, xmm" xed="VFMADDSUB213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="xmm {z}, xmm, xmm" xed="VFMADDSUB231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="ymm {k}, ymm, ymm" xed="VFMADDSUB132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="ymm {k}, ymm, ymm" xed="VFMADDSUB213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="ymm {k}, ymm, ymm" xed="VFMADDSUB231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="ymm {k}, ymm, ymm" xed="VFMADDSUB132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="ymm {k}, ymm, ymm" xed="VFMADDSUB213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="ymm {k}, ymm, ymm" xed="VFMADDSUB231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="ymm {z}, ymm, ymm" xed="VFMADDSUB132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="ymm {z}, ymm, ymm" xed="VFMADDSUB213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="ymm {z}, ymm, ymm" xed="VFMADDSUB231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="xmm {k}, xmm, xmm" xed="VFMADDSUB132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="xmm {k}, xmm, xmm" xed="VFMADDSUB213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="xmm {k}, xmm, xmm" xed="VFMADDSUB231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="xmm {k}, xmm, xmm" xed="VFMADDSUB132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="xmm {k}, xmm, xmm" xed="VFMADDSUB213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="xmm {k}, xmm, xmm" xed="VFMADDSUB231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="xmm {z}, xmm, xmm" xed="VFMADDSUB132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="xmm {z}, xmm, xmm" xed="VFMADDSUB213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="xmm {z}, xmm, xmm" xed="VFMADDSUB231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="ymm {k}, ymm, ymm" xed="VFMSUB132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="ymm {k}, ymm, ymm" xed="VFMSUB213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="ymm {k}, ymm, ymm" xed="VFMSUB231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="ymm {k}, ymm, ymm" xed="VFMSUB132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="ymm {k}, ymm, ymm" xed="VFMSUB213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="ymm {k}, ymm, ymm" xed="VFMSUB231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="ymm {z}, ymm, ymm" xed="VFMSUB132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="ymm {z}, ymm, ymm" xed="VFMSUB213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="ymm {z}, ymm, ymm" xed="VFMSUB231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="xmm {k}, xmm, xmm" xed="VFMSUB132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="xmm {k}, xmm, xmm" xed="VFMSUB213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="xmm {k}, xmm, xmm" xed="VFMSUB231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="xmm {k}, xmm, xmm" xed="VFMSUB132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="xmm {k}, xmm, xmm" xed="VFMSUB213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="xmm {k}, xmm, xmm" xed="VFMSUB231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="xmm {z}, xmm, xmm" xed="VFMSUB132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="xmm {z}, xmm, xmm" xed="VFMSUB213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="xmm {z}, xmm, xmm" xed="VFMSUB231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="ymm {k}, ymm, ymm" xed="VFMSUB132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="ymm {k}, ymm, ymm" xed="VFMSUB213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="ymm {k}, ymm, ymm" xed="VFMSUB231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="ymm {k}, ymm, ymm" xed="VFMSUB132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="ymm {k}, ymm, ymm" xed="VFMSUB213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="ymm {k}, ymm, ymm" xed="VFMSUB231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="ymm {z}, ymm, ymm" xed="VFMSUB132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="ymm {z}, ymm, ymm" xed="VFMSUB213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="ymm {z}, ymm, ymm" xed="VFMSUB231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="xmm {k}, xmm, xmm" xed="VFMSUB132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="xmm {k}, xmm, xmm" xed="VFMSUB213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="xmm {k}, xmm, xmm" xed="VFMSUB231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="xmm {k}, xmm, xmm" xed="VFMSUB132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="xmm {k}, xmm, xmm" xed="VFMSUB213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="xmm {k}, xmm, xmm" xed="VFMSUB231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="xmm {z}, xmm, xmm" xed="VFMSUB132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="xmm {z}, xmm, xmm" xed="VFMSUB213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="xmm {z}, xmm, xmm" xed="VFMSUB231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="ymm {k}, ymm, ymm" xed="VFMSUBADD132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="ymm {k}, ymm, ymm" xed="VFMSUBADD213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="ymm {k}, ymm, ymm" xed="VFMSUBADD231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="ymm {k}, ymm, ymm" xed="VFMSUBADD132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="ymm {k}, ymm, ymm" xed="VFMSUBADD213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="ymm {k}, ymm, ymm" xed="VFMSUBADD231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="ymm {z}, ymm, ymm" xed="VFMSUBADD132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="ymm {z}, ymm, ymm" xed="VFMSUBADD213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="ymm {z}, ymm, ymm" xed="VFMSUBADD231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="xmm {k}, xmm, xmm" xed="VFMSUBADD132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="xmm {k}, xmm, xmm" xed="VFMSUBADD213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="xmm {k}, xmm, xmm" xed="VFMSUBADD231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="xmm {k}, xmm, xmm" xed="VFMSUBADD132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="xmm {k}, xmm, xmm" xed="VFMSUBADD213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="xmm {k}, xmm, xmm" xed="VFMSUBADD231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="xmm {z}, xmm, xmm" xed="VFMSUBADD132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="xmm {z}, xmm, xmm" xed="VFMSUBADD213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="xmm {z}, xmm, xmm" xed="VFMSUBADD231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="ymm {k}, ymm, ymm" xed="VFMSUBADD132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="ymm {k}, ymm, ymm" xed="VFMSUBADD213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="ymm {k}, ymm, ymm" xed="VFMSUBADD231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="ymm {k}, ymm, ymm" xed="VFMSUBADD132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="ymm {k}, ymm, ymm" xed="VFMSUBADD213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="ymm {k}, ymm, ymm" xed="VFMSUBADD231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="ymm {z}, ymm, ymm" xed="VFMSUBADD132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="ymm {z}, ymm, ymm" xed="VFMSUBADD213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="ymm {z}, ymm, ymm" xed="VFMSUBADD231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="xmm {k}, xmm, xmm" xed="VFMSUBADD132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="xmm {k}, xmm, xmm" xed="VFMSUBADD213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="xmm {k}, xmm, xmm" xed="VFMSUBADD231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="xmm {k}, xmm, xmm" xed="VFMSUBADD132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="xmm {k}, xmm, xmm" xed="VFMSUBADD213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="xmm {k}, xmm, xmm" xed="VFMSUBADD231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="xmm {z}, xmm, xmm" xed="VFMSUBADD132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="xmm {z}, xmm, xmm" xed="VFMSUBADD213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="xmm {z}, xmm, xmm" xed="VFMSUBADD231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="ymm {k}, ymm, ymm" xed="VFNMADD132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="ymm {k}, ymm, ymm" xed="VFNMADD213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="ymm {k}, ymm, ymm" xed="VFNMADD231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="ymm {k}, ymm, ymm" xed="VFNMADD132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="ymm {k}, ymm, ymm" xed="VFNMADD213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="ymm {k}, ymm, ymm" xed="VFNMADD231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="ymm {z}, ymm, ymm" xed="VFNMADD132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="ymm {z}, ymm, ymm" xed="VFNMADD213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="ymm {z}, ymm, ymm" xed="VFNMADD231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="xmm {k}, xmm, xmm" xed="VFNMADD132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="xmm {k}, xmm, xmm" xed="VFNMADD213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="xmm {k}, xmm, xmm" xed="VFNMADD231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="xmm {k}, xmm, xmm" xed="VFNMADD132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="xmm {k}, xmm, xmm" xed="VFNMADD213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="xmm {k}, xmm, xmm" xed="VFNMADD231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="xmm {z}, xmm, xmm" xed="VFNMADD132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="xmm {z}, xmm, xmm" xed="VFNMADD213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="xmm {z}, xmm, xmm" xed="VFNMADD231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="ymm {k}, ymm, ymm" xed="VFNMADD132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="ymm {k}, ymm, ymm" xed="VFNMADD213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="ymm {k}, ymm, ymm" xed="VFNMADD231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="ymm {k}, ymm, ymm" xed="VFNMADD132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="ymm {k}, ymm, ymm" xed="VFNMADD213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="ymm {k}, ymm, ymm" xed="VFNMADD231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="ymm {z}, ymm, ymm" xed="VFNMADD132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="ymm {z}, ymm, ymm" xed="VFNMADD213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="ymm {z}, ymm, ymm" xed="VFNMADD231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="xmm {k}, xmm, xmm" xed="VFNMADD132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="xmm {k}, xmm, xmm" xed="VFNMADD213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="xmm {k}, xmm, xmm" xed="VFNMADD231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="xmm {k}, xmm, xmm" xed="VFNMADD132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="xmm {k}, xmm, xmm" xed="VFNMADD213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="xmm {k}, xmm, xmm" xed="VFNMADD231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="xmm {z}, xmm, xmm" xed="VFNMADD132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="xmm {z}, xmm, xmm" xed="VFNMADD213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="xmm {z}, xmm, xmm" xed="VFNMADD231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="ymm {k}, ymm, ymm" xed="VFNMSUB132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="ymm {k}, ymm, ymm" xed="VFNMSUB213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="ymm {k}, ymm, ymm" xed="VFNMSUB231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="ymm {k}, ymm, ymm" xed="VFNMSUB132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="ymm {k}, ymm, ymm" xed="VFNMSUB213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="ymm {k}, ymm, ymm" xed="VFNMSUB231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="ymm {z}, ymm, ymm" xed="VFNMSUB132PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="ymm {z}, ymm, ymm" xed="VFNMSUB213PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="ymm {z}, ymm, ymm" xed="VFNMSUB231PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="xmm {k}, xmm, xmm" xed="VFNMSUB132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="xmm {k}, xmm, xmm" xed="VFNMSUB213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="xmm {k}, xmm, xmm" xed="VFNMSUB231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="xmm {k}, xmm, xmm" xed="VFNMSUB132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="xmm {k}, xmm, xmm" xed="VFNMSUB213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="xmm {k}, xmm, xmm" xed="VFNMSUB231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="xmm {z}, xmm, xmm" xed="VFNMSUB132PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="xmm {z}, xmm, xmm" xed="VFNMSUB213PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="xmm {z}, xmm, xmm" xed="VFNMSUB231PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask3_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="ymm {k}, ymm, ymm" xed="VFNMSUB132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="ymm {k}, ymm, ymm" xed="VFNMSUB213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="ymm {k}, ymm, ymm" xed="VFNMSUB231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="ymm {k}, ymm, ymm" xed="VFNMSUB132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="ymm {k}, ymm, ymm" xed="VFNMSUB213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="ymm {k}, ymm, ymm" xed="VFNMSUB231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="ymm {z}, ymm, ymm" xed="VFNMSUB132PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="ymm {z}, ymm, ymm" xed="VFNMSUB213PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="ymm {z}, ymm, ymm" xed="VFNMSUB231PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="xmm {k}, xmm, xmm" xed="VFNMSUB132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="xmm {k}, xmm, xmm" xed="VFNMSUB213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="xmm {k}, xmm, xmm" xed="VFNMSUB231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="xmm {k}, xmm, xmm" xed="VFNMSUB132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="xmm {k}, xmm, xmm" xed="VFNMSUB213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="xmm {k}, xmm, xmm" xed="VFNMSUB231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="xmm {z}, xmm, xmm" xed="VFNMSUB132PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="xmm {z}, xmm, xmm" xed="VFNMSUB213PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="xmm {z}, xmm, xmm" xed="VFNMSUB231PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mmask_i32gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="ymm {k}, vm32x" xed="VGATHERDPD_YMMf64_MASKmskw_MEMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mmask_i32gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="xmm {k}, vm32x" xed="VGATHERDPD_XMMf64_MASKmskw_MEMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mmask_i32gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGATHERDPS" form="ymm {k}, vm32y" xed="VGATHERDPS_YMMf32_MASKmskw_MEMf32_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mmask_i32gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERDPS" form="xmm {k}, vm32x" xed="VGATHERDPS_XMMf32_MASKmskw_MEMf32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mmask_i64gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGATHERQPD" form="ymm {k}, vm64y" xed="VGATHERQPD_YMMf64_MASKmskw_MEMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mmask_i64gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERQPD" form="xmm {k}, vm64x" xed="VGATHERQPD_XMMf64_MASKmskw_MEMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mmask_i64gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGATHERQPS" form="ymm {k}, vm64y" xed="VGATHERQPS_YMMf32_MASKmskw_MEMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mmask_i64gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VGATHERQPS" form="xmm {k}, vm64x" xed="VGATHERQPS_XMMf32_MASKmskw_MEMf32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_getexp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="ymm, ymm" xed="VGETEXPPD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_getexp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="ymm {k}, ymm" xed="VGETEXPPD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_getexp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="ymm {z}, ymm" xed="VGETEXPPD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getexp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="xmm, xmm" xed="VGETEXPPD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getexp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="xmm {k}, xmm" xed="VGETEXPPD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getexp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="xmm {z}, xmm" xed="VGETEXPPD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_getexp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="ymm, ymm" xed="VGETEXPPS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_getexp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="ymm {k}, ymm" xed="VGETEXPPS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_getexp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="ymm {z}, ymm" xed="VGETEXPPS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getexp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="xmm, xmm" xed="VGETEXPPS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getexp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="xmm {k}, xmm" xed="VGETEXPPS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getexp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="xmm {z}, xmm" xed="VGETEXPPS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_getmant_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="ymm, ymm, imm8" xed="VGETMANTPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_getmant_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="ymm {k}, ymm, imm8" xed="VGETMANTPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_getmant_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="ymm {z}, ymm, imm8" xed="VGETMANTPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getmant_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="xmm, xmm, imm8" xed="VGETMANTPD_XMMf64_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getmant_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="xmm {k}, xmm, imm8" xed="VGETMANTPD_XMMf64_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getmant_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="xmm {z}, xmm, imm8" xed="VGETMANTPD_XMMf64_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_getmant_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="ymm, ymm, imm8" xed="VGETMANTPS_YMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_getmant_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="ymm {k}, ymm, imm8" xed="VGETMANTPS_YMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_getmant_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="ymm {z}, ymm, imm8" xed="VGETMANTPS_YMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getmant_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="xmm, xmm, imm8" xed="VGETMANTPS_XMMf32_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getmant_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="xmm {k}, xmm, imm8" xed="VGETMANTPS_XMMf32_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getmant_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="xmm {z}, xmm, imm8" xed="VGETMANTPS_XMMf32_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_insertf32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF32X4" form="ymm, ymm, xmm, imm8" xed="VINSERTF32X4_YMMf32_MASKmskw_YMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_insertf32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF32X4" form="ymm {k}, ymm, xmm, imm8" xed="VINSERTF32X4_YMMf32_MASKmskw_YMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_insertf32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTF32X4" form="ymm {z}, ymm, xmm, imm8" xed="VINSERTF32X4_YMMf32_MASKmskw_YMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_inserti32x4">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTI32X4" form="ymm, ymm, xmm, imm8" xed="VINSERTI32X4_YMMu32_MASKmskw_YMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_inserti32x4">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTI32X4" form="ymm {k}, ymm, xmm, imm8" xed="VINSERTI32X4_YMMu32_MASKmskw_YMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_inserti32x4">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[255:0] := a[255:0]
+CASE (imm8[0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VINSERTI32X4" form="ymm {z}, ymm, xmm, imm8" xed="VINSERTI32X4_YMMu32_MASKmskw_YMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_max_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMAXPD" form="ymm {k}, ymm, ymm" xed="VMAXPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_max_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMAXPD" form="ymm {z}, ymm, ymm" xed="VMAXPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXPD" form="xmm {k}, xmm, xmm" xed="VMAXPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXPD" form="xmm {z}, xmm, xmm" xed="VMAXPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_max_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMAXPS" form="ymm {k}, ymm, ymm" xed="VMAXPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_max_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMAXPS" form="ymm {z}, ymm, ymm" xed="VMAXPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXPS" form="xmm {k}, xmm, xmm" xed="VMAXPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXPS" form="xmm {z}, xmm, xmm" xed="VMAXPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_min_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMINPD" form="ymm {k}, ymm, ymm" xed="VMINPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_min_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMINPD" form="ymm {z}, ymm, ymm" xed="VMINPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINPD" form="xmm {k}, xmm, xmm" xed="VMINPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINPD" form="xmm {z}, xmm, xmm" xed="VMINPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_min_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMINPS" form="ymm {k}, ymm, ymm" xed="VMINPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_min_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMINPS" form="ymm {z}, ymm, ymm" xed="VMINPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINPS" form="xmm {k}, xmm, xmm" xed="VMINPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINPS" form="xmm {z}, xmm, xmm" xed="VMINPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_load_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="ymm {k}, m64" xed="VMOVAPD_YMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mov_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Move packed double-precision (64-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="ymm {k}, ymm" xed="VMOVAPD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_store_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVAPD" form="m256 {k}, ymm" xed="VMOVAPD_MEMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_load_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="ymm {z}, m256" xed="VMOVAPD_YMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mov_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Move packed double-precision (64-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="ymm {z}, ymm" xed="VMOVAPD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_load_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="xmm {k}, m128" xed="VMOVAPD_XMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mov_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Move packed double-precision (64-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="xmm {k}, xmm" xed="VMOVAPD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_store_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVAPD" form="m128 {k}, xmm" xed="VMOVAPD_MEMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_load_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="xmm {z}, m128" xed="VMOVAPD_XMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mov_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Move packed double-precision (64-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="xmm {z}, xmm" xed="VMOVAPD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_load_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="ymm {k}, m256" xed="VMOVAPS_YMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mov_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Move packed single-precision (32-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="ymm {k}, ymm" xed="VMOVAPS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_store_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVAPS" form="m256 {k}, ymm" xed="VMOVAPS_MEMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_load_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="ymm {z}, m256" xed="VMOVAPS_YMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mov_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Move packed single-precision (32-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="ymm {z}, ymm" xed="VMOVAPS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_load_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="xmm {k}, m128" xed="VMOVAPS_XMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mov_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Move packed single-precision (32-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="xmm {k}, xmm" xed="VMOVAPS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_store_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVAPS" form="m128 {k}, xmm" xed="VMOVAPS_MEMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_load_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="xmm {z}, m128" xed="VMOVAPS_XMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mov_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Move packed single-precision (32-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="xmm {z}, xmm" xed="VMOVAPS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_movedup_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[63:0] := a[63:0]
+tmp[127:64] := a[63:0]
+tmp[191:128] := a[191:128]
+tmp[255:192] := a[191:128]
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDDUP" form="ymm {k}, ymm" xed="VMOVDDUP_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_movedup_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[63:0] := a[63:0]
+tmp[127:64] := a[63:0]
+tmp[191:128] := a[191:128]
+tmp[255:192] := a[191:128]
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDDUP" form="ymm {z}, ymm" xed="VMOVDDUP_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_movedup_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[63:0] := a[63:0]
+tmp[127:64] := a[63:0]
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDDUP" form="xmm {k}, xmm" xed="VMOVDDUP_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_movedup_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[63:0] := a[63:0]
+tmp[127:64] := a[63:0]
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDDUP" form="xmm {z}, xmm" xed="VMOVDDUP_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_load_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <description>Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="ymm {k}, m64" xed="VMOVDQA32_YMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mov_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="ymm {k}, ymm" xed="VMOVDQA32_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_store_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Store packed 32-bit integers from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQA32" form="m256 {k}, ymm" xed="VMOVDQA32_MEMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_load_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <description>Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="ymm {z}, m64" xed="VMOVDQA32_YMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mov_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="ymm {z}, ymm" xed="VMOVDQA32_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_load_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <description>Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="xmm {k}, m64" xed="VMOVDQA32_XMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mov_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="xmm {k}, xmm" xed="VMOVDQA32_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_store_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Store packed 32-bit integers from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQA32" form="m128 {k}, xmm" xed="VMOVDQA32_MEMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_load_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <description>Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="xmm {z}, m64" xed="VMOVDQA32_XMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mov_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="xmm {z}, xmm" xed="VMOVDQA32_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_load_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <description>Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="ymm {k}, m64" xed="VMOVDQA64_YMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mov_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="ymm {k}, ymm" xed="VMOVDQA64_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_store_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Store packed 64-bit integers from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQA64" form="m256 {k}, ymm" xed="VMOVDQA64_MEMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_load_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <description>Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="ymm {z}, m64" xed="VMOVDQA64_YMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mov_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="ymm {z}, ymm" xed="VMOVDQA64_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_load_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <description>Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="xmm {k}, m64" xed="VMOVDQA64_XMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mov_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="xmm {k}, xmm" xed="VMOVDQA64_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_store_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Store packed 64-bit integers from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQA64" form="m128 {k}, xmm" xed="VMOVDQA64_MEMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_load_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <description>Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="xmm {z}, m64" xed="VMOVDQA64_XMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mov_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="xmm {z}, xmm" xed="VMOVDQA64_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_loadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <description>Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU32" form="ymm {k}, m64" xed="VMOVDQU32_YMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Store packed 32-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU32" form="m256 {k}, ymm" xed="VMOVDQU32_MEMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_loadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <description>Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU32" form="ymm {z}, m64" xed="VMOVDQU32_YMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_loadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <description>Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU32" form="xmm {k}, m64" xed="VMOVDQU32_XMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Store packed 32-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU32" form="m128 {k}, xmm" xed="VMOVDQU32_MEMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_loadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <description>Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU32" form="xmm {z}, m64" xed="VMOVDQU32_XMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_loadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <description>Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU64" form="ymm {k}, m64" xed="VMOVDQU64_YMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_storeu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Store packed 64-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU64" form="m256 {k}, ymm" xed="VMOVDQU64_MEMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_loadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <description>Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU64" form="ymm {z}, m64" xed="VMOVDQU64_YMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_loadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <description>Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU64" form="xmm {k}, m64" xed="VMOVDQU64_XMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_storeu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Store packed 64-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU64" form="m128 {k}, xmm" xed="VMOVDQU64_MEMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_loadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <description>Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU64" form="xmm {z}, m64" xed="VMOVDQU64_XMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_movehdup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[63:32]
+tmp[63:32] := a[63:32]
+tmp[95:64] := a[127:96]
+tmp[127:96] := a[127:96]
+tmp[159:128] := a[191:160]
+tmp[191:160] := a[191:160]
+tmp[223:192] := a[255:224]
+tmp[255:224] := a[255:224]
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVSHDUP" form="ymm {k}, ymm" xed="VMOVSHDUP_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_movehdup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[63:32]
+tmp[63:32] := a[63:32]
+tmp[95:64] := a[127:96]
+tmp[127:96] := a[127:96]
+tmp[159:128] := a[191:160]
+tmp[191:160] := a[191:160]
+tmp[223:192] := a[255:224]
+tmp[255:224] := a[255:224]
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVSHDUP" form="ymm {z}, ymm" xed="VMOVSHDUP_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_movehdup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[63:32]
+tmp[63:32] := a[63:32]
+tmp[95:64] := a[127:96]
+tmp[127:96] := a[127:96]
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVSHDUP" form="xmm {k}, xmm" xed="VMOVSHDUP_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_movehdup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[63:32]
+tmp[63:32] := a[63:32]
+tmp[95:64] := a[127:96]
+tmp[127:96] := a[127:96]
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVSHDUP" form="xmm {z}, xmm" xed="VMOVSHDUP_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_moveldup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[31:0]
+tmp[63:32] := a[31:0]
+tmp[95:64] := a[95:64]
+tmp[127:96] := a[95:64]
+tmp[159:128] := a[159:128]
+tmp[191:160] := a[159:128]
+tmp[223:192] := a[223:192]
+tmp[255:224] := a[223:192]
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVSLDUP" form="ymm {k}, ymm" xed="VMOVSLDUP_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_moveldup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[31:0]
+tmp[63:32] := a[31:0]
+tmp[95:64] := a[95:64]
+tmp[127:96] := a[95:64]
+tmp[159:128] := a[159:128]
+tmp[191:160] := a[159:128]
+tmp[223:192] := a[223:192]
+tmp[255:224] := a[223:192]
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVSLDUP" form="ymm {z}, ymm" xed="VMOVSLDUP_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_moveldup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[31:0]
+tmp[63:32] := a[31:0]
+tmp[95:64] := a[95:64]
+tmp[127:96] := a[95:64]
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVSLDUP" form="xmm {k}, xmm" xed="VMOVSLDUP_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_moveldup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[31:0]
+tmp[63:32] := a[31:0]
+tmp[95:64] := a[95:64]
+tmp[127:96] := a[95:64]
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVSLDUP" form="xmm {z}, xmm" xed="VMOVSLDUP_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_loadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVUPD" form="ymm {k}, m64" xed="VMOVUPD_YMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_storeu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVUPD" form="m256 {k}, ymm" xed="VMOVUPD_MEMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_loadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="256"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVUPD" form="ymm {z}, m64" xed="VMOVUPD_YMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_loadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVUPD" form="xmm {k}, m64" xed="VMOVUPD_XMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_storeu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVUPD" form="m128 {k}, xmm" xed="VMOVUPD_MEMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_loadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVUPD" form="xmm {z}, m64" xed="VMOVUPD_XMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_loadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVUPS" form="ymm {k}, m64" xed="VMOVUPS_YMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_storeu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVUPS" form="m256 {k}, ymm" xed="VMOVUPS_MEMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_loadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="256"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVUPS" form="ymm {z}, m64" xed="VMOVUPS_YMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_loadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVUPS" form="xmm {k}, m64" xed="VMOVUPS_XMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_storeu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVUPS" form="m128 {k}, xmm" xed="VMOVUPS_MEMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_loadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVUPS" form="xmm {z}, m64" xed="VMOVUPS_XMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mul_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMULPD" form="ymm {k}, ymm, ymm" xed="VMULPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mul_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMULPD" form="ymm {z}, ymm, ymm" xed="VMULPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mul_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULPD" form="xmm {k}, xmm, xmm" xed="VMULPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mul_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULPD" form="xmm {z}, xmm, xmm" xed="VMULPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mul_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). RM.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMULPS" form="ymm {k}, ymm, ymm" xed="VMULPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mul_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMULPS" form="ymm {z}, ymm, ymm" xed="VMULPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mul_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULPS" form="xmm {k}, xmm, xmm" xed="VMULPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mul_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULPS" form="xmm {z}, xmm, xmm" xed="VMULPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_abs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ABS(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSD" form="ymm {k}, ymm" xed="VPABSD_YMMi32_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_abs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ABS(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSD" form="ymm {z}, ymm" xed="VPABSD_YMMi32_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_abs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ABS(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPABSD" form="xmm {k}, xmm" xed="VPABSD_XMMi32_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_abs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ABS(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPABSD" form="xmm {z}, xmm" xed="VPABSD_XMMi32_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_abs_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ABS(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSQ" form="ymm, ymm" xed="VPABSQ_YMMi64_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_abs_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ABS(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSQ" form="ymm {k}, ymm" xed="VPABSQ_YMMi64_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_abs_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ABS(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPABSQ" form="ymm {z}, ymm" xed="VPABSQ_YMMi64_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_abs_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ABS(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPABSQ" form="xmm, xmm" xed="VPABSQ_XMMi64_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_abs_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ABS(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPABSQ" form="xmm {k}, xmm" xed="VPABSQ_XMMi64_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_abs_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ABS(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPABSQ" form="xmm {z}, xmm" xed="VPABSQ_XMMi64_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_add_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Add packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDD" form="ymm {k}, ymm, ymm" xed="VPADDD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_add_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Add packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDD" form="ymm {z}, ymm, ymm" xed="VPADDD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_add_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Add packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDD" form="xmm {k}, xmm, xmm" xed="VPADDD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_add_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Add packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDD" form="xmm {z}, xmm, xmm" xed="VPADDD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_add_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Add packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDQ" form="ymm {k}, ymm, ymm" xed="VPADDQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_add_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Add packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] :=0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPADDQ" form="ymm {z}, ymm, ymm" xed="VPADDQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_add_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Add packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDQ" form="xmm {k}, xmm, xmm" xed="VPADDQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_add_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Add packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPADDQ" form="xmm {z}, xmm, xmm" xed="VPADDQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_and_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] AND b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPANDD" form="ymm {k}, ymm, ymm" xed="VPANDD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_and_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] AND b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPANDD" form="ymm {z}, ymm, ymm" xed="VPANDD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_and_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] AND b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPANDD" form="xmm {k}, xmm, xmm" xed="VPANDD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_and_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] AND b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPANDD" form="xmm {z}, xmm, xmm" xed="VPANDD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_andnot_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPANDND" form="ymm {k}, ymm, ymm" xed="VPANDND_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_andnot_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPANDND" form="ymm {z}, ymm, ymm" xed="VPANDND_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_andnot_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPANDND" form="xmm {k}, xmm, xmm" xed="VPANDND_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_andnot_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPANDND" form="xmm {z}, xmm, xmm" xed="VPANDND_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_andnot_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPANDNQ" form="ymm {k}, ymm, ymm" xed="VPANDNQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_andnot_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPANDNQ" form="ymm {z}, ymm, ymm" xed="VPANDNQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_andnot_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPANDNQ" form="xmm {k}, xmm, xmm" xed="VPANDNQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_andnot_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPANDNQ" form="xmm {z}, xmm, xmm" xed="VPANDNQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_and_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] AND b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPANDQ" form="ymm {k}, ymm, ymm" xed="VPANDQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_and_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] AND b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPANDQ" form="ymm {z}, ymm, ymm" xed="VPANDQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_and_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] AND b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPANDQ" form="xmm {k}, xmm, xmm" xed="VPANDQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_and_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] AND b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPANDQ" form="xmm {z}, xmm, xmm" xed="VPANDQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_blend_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Blend packed 32-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBLENDMD" form="ymm {k}, ymm, ymm" xed="VPBLENDMD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_blend_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Blend packed 32-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBLENDMD" form="xmm {k}, xmm, xmm" xed="VPBLENDMD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_blend_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Blend packed 64-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBLENDMQ" form="ymm {k}, ymm, ymm" xed="VPBLENDMQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_blend_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Blend packed 64-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBLENDMQ" form="xmm {k}, xmm, xmm" xed="VPBLENDMQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_broadcastd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="ymm {k}, xmm" xed="VPBROADCASTD_YMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_set1_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="ymm {k}, r32" xed="VPBROADCASTD_YMMu32_MASKmskw_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_broadcastd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="ymm {z}, xmm" xed="VPBROADCASTD_YMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_set1_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="ymm {z}, r32" xed="VPBROADCASTD_YMMu32_MASKmskw_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_broadcastd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="xmm {k}, xmm" xed="VPBROADCASTD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_set1_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="xmm {k}, r32" xed="VPBROADCASTD_XMMu32_MASKmskw_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_broadcastd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="xmm {z}, xmm" xed="VPBROADCASTD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_set1_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="xmm {z}, r32" xed="VPBROADCASTD_XMMu32_MASKmskw_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_broadcastq_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="ymm {k}, xmm" xed="VPBROADCASTQ_YMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_set1_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="ymm {k}, r64" xed="VPBROADCASTQ_YMMu64_MASKmskw_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_broadcastq_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="ymm {z}, xmm" xed="VPBROADCASTQ_YMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_set1_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="ymm {z}, r64" xed="VPBROADCASTQ_YMMu64_MASKmskw_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_broadcastq_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="xmm {k}, xmm" xed="VPBROADCASTQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_set1_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="xmm {k}, r64" xed="VPBROADCASTQ_XMMu64_MASKmskw_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_broadcastq_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="xmm {z}, xmm" xed="VPBROADCASTQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_set1_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="xmm {z}, r64" xed="VPBROADCASTQ_XMMu64_MASKmskw_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmp_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, ymm, ymm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpeq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpge_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpgt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmple_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmplt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpneq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmp_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, ymm, ymm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpeq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpge_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpgt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmple_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmplt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpneq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, ymm, ymm" xed="VPCMPD_MASKmskw_MASKmskw_YMMi32_YMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, xmm, xmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpeq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpge_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpgt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmple_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmplt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpneq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, xmm, xmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpeq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpge_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpgt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmple_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmplt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpneq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, xmm, xmm" xed="VPCMPD_MASKmskw_MASKmskw_XMMi32_XMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmp_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, ymm, ymm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpeq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpge_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpgt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmple_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmplt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpneq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmp_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, ymm, ymm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpeq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpge_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpgt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmple_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmplt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpneq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, ymm, ymm" xed="VPCMPQ_MASKmskw_MASKmskw_YMMi64_YMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, xmm, xmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpeq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpge_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpgt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmple_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmplt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpneq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, xmm, xmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpeq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpge_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpgt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmple_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmplt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpneq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, xmm, xmm" xed="VPCMPQ_MASKmskw_MASKmskw_XMMi64_XMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmp_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, ymm, ymm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpeq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpge_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpgt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmple_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmplt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpneq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmp_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, ymm, ymm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpeq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpge_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpgt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmple_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmplt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpneq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, ymm, ymm" xed="VPCMPUD_MASKmskw_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, xmm, xmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpeq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpge_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpgt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmple_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmplt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpneq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, xmm, xmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpeq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpge_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpgt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmple_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmplt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpneq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, xmm, xmm" xed="VPCMPUD_MASKmskw_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmp_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, ymm, ymm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpeq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpge_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpgt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmple_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmplt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cmpneq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmp_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, ymm, ymm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpeq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpge_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpgt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmple_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmplt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cmpneq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, ymm, ymm" xed="VPCMPUQ_MASKmskw_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, xmm, xmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpeq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpge_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpgt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmple_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmplt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmpneq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, xmm, xmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpeq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpge_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpgt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmple_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmplt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmpneq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, xmm, xmm" xed="VPCMPUQ_MASKmskw_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compress_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := src[255:m]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCOMPRESSD" form="ymm {k}, ymm" xed="VPCOMPRESSD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compressstoreu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 32
+m := base_addr
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ MEM[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSD" form="m256 {k}, ymm" xed="VPCOMPRESSD_MEMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_compress_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Contiguously store the active 32-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCOMPRESSD" form="ymm {z}, ymm" xed="VPCOMPRESSD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compress_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := src[127:m]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCOMPRESSD" form="xmm {k}, xmm" xed="VPCOMPRESSD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compressstoreu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 32
+m := base_addr
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ MEM[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSD" form="m128 {k}, xmm" xed="VPCOMPRESSD_MEMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_compress_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Contiguously store the active 32-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCOMPRESSD" form="xmm {z}, xmm" xed="VPCOMPRESSD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compress_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := src[255:m]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCOMPRESSQ" form="ymm {k}, ymm" xed="VPCOMPRESSQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compressstoreu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 64
+m := base_addr
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ MEM[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSQ" form="m256 {k}, ymm" xed="VPCOMPRESSQ_MEMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_compress_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Contiguously store the active 64-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCOMPRESSQ" form="ymm {z}, ymm" xed="VPCOMPRESSQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compress_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := src[127:m]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCOMPRESSQ" form="xmm {k}, xmm" xed="VPCOMPRESSQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compressstoreu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 64
+m := base_addr
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ MEM[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSQ" form="m128 {k}, xmm" xed="VPCOMPRESSQ_MEMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_compress_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Contiguously store the active 64-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCOMPRESSQ" form="xmm {z}, xmm" xed="VPCOMPRESSQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutexvar_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ id := idx[i+2:i]*32
+ IF k[j]
+ dst[i+31:i] := a[id+31:id]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMD" form="ymm {k}, ymm, ymm" xed="VPERMD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutexvar_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ id := idx[i+2:i]*32
+ IF k[j]
+ dst[i+31:i] := a[id+31:id]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMD" form="ymm {z}, ymm, ymm" xed="VPERMD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutexvar_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ id := idx[i+2:i]*32
+ dst[i+31:i] := a[id+31:id]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMD" form="ymm, ymm, ymm" xed="VPERMD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask2_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ off := idx[i+2:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := idx[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2D" form="ymm {k}, ymm, ymm" xed="VPERMI2D_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ off := idx[i+2:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMT2D" form="ymm {k}, ymm, ymm" xed="VPERMT2D_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ off := idx[i+2:i]*32
+ IF k[j]
+ dst[i+31:i] := (idx[i+3]) ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2D" form="ymm {z}, ymm, ymm" xed="VPERMI2D_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <instruction name="VPERMT2D" form="ymm {z}, ymm, ymm" xed="VPERMT2D_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ off := idx[i+2:i]*32
+ dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2D" form="ymm, ymm, ymm" xed="VPERMI2D_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <instruction name="VPERMT2D" form="ymm, ymm, ymm" xed="VPERMT2D_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask2_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="idx" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ off := idx[i+1:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := idx[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2D" form="xmm {k}, xmm, xmm" xed="VPERMI2D_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="idx" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ off := idx[i+1:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMT2D" form="xmm {k}, xmm, xmm" xed="VPERMT2D_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="idx" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ off := idx[i+1:i]*32
+ IF k[j]
+ dst[i+31:i] := (idx[i+2]) ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2D" form="xmm {z}, xmm, xmm" xed="VPERMI2D_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <instruction name="VPERMT2D" form="xmm {z}, xmm, xmm" xed="VPERMT2D_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="idx" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ off := idx[i+1:i]*32
+ dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2D" form="xmm, xmm, xmm" xed="VPERMI2D_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <instruction name="VPERMT2D" form="xmm, xmm, xmm" xed="VPERMT2D_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask2_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ off := idx[i+1:i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := idx[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2PD" form="ymm {k}, ymm, ymm" xed="VPERMI2PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ off := idx[i+1:i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMT2PD" form="ymm {k}, ymm, ymm" xed="VPERMT2PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ off := idx[i+1:i]*64
+ IF k[j]
+ dst[i+63:i] := (idx[i+2]) ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2PD" form="ymm {z}, ymm, ymm" xed="VPERMI2PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VPERMT2PD" form="ymm {z}, ymm, ymm" xed="VPERMT2PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ off := idx[i+1:i]*64
+ dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2PD" form="ymm, ymm, ymm" xed="VPERMI2PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <instruction name="VPERMT2PD" form="ymm, ymm, ymm" xed="VPERMT2PD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask2_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128i" varname="idx" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set)</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ off := idx[i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := idx[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2PD" form="xmm {k}, xmm, xmm" xed="VPERMI2PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="idx" etype="UI64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ off := idx[i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMT2PD" form="xmm {k}, xmm, xmm" xed="VPERMT2PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128i" varname="idx" etype="UI64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ off := idx[i]*64
+ IF k[j]
+ dst[i+63:i] := (idx[i+1]) ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2PD" form="xmm {z}, xmm, xmm" xed="VPERMI2PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VPERMT2PD" form="xmm {z}, xmm, xmm" xed="VPERMT2PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128i" varname="idx" etype="UI64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ off := idx[i]*64
+ dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2PD" form="xmm, xmm, xmm" xed="VPERMI2PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VPERMT2PD" form="xmm, xmm, xmm" xed="VPERMT2PD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask2_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ off := idx[i+2:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := idx[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2PS" form="ymm {k}, ymm, ymm" xed="VPERMI2PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ off := idx[i+2:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMT2PS" form="ymm {k}, ymm, ymm" xed="VPERMT2PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ off := idx[i+2:i]*32
+ IF k[j]
+ dst[i+31:i] := (idx[i+3]) ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2PS" form="ymm {z}, ymm, ymm" xed="VPERMI2PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VPERMT2PS" form="ymm {z}, ymm, ymm" xed="VPERMT2PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ off := idx[i+2:i]*32
+ dst[i+31:i] := idx[i+3] ? b[off+31:off] : a[off+31:off]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2PS" form="ymm, ymm, ymm" xed="VPERMI2PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <instruction name="VPERMT2PS" form="ymm, ymm, ymm" xed="VPERMT2PS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask2_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128i" varname="idx" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ off := idx[i+1:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := idx[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2PS" form="xmm {k}, xmm, xmm" xed="VPERMI2PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="idx" etype="UI32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ off := idx[i+1:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMT2PS" form="xmm {k}, xmm, xmm" xed="VPERMT2PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128i" varname="idx" etype="UI32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ off := idx[i+1:i]*32
+ IF k[j]
+ dst[i+31:i] := (idx[i+2]) ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2PS" form="xmm {z}, xmm, xmm" xed="VPERMI2PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VPERMT2PS" form="xmm {z}, xmm, xmm" xed="VPERMT2PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128i" varname="idx" etype="UI32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ off := idx[i+1:i]*32
+ dst[i+31:i] := idx[i+2] ? b[off+31:off] : a[off+31:off]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2PS" form="xmm, xmm, xmm" xed="VPERMI2PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VPERMT2PS" form="xmm, xmm, xmm" xed="VPERMT2PS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask2_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ off := idx[i+1:i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := idx[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2Q" form="ymm {k}, ymm, ymm" xed="VPERMI2Q_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ off := idx[i+1:i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMT2Q" form="ymm {k}, ymm, ymm" xed="VPERMT2Q_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ off := idx[i+1:i]*64
+ IF k[j]
+ dst[i+63:i] := (idx[i+2]) ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2Q" form="ymm {z}, ymm, ymm" xed="VPERMI2Q_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <instruction name="VPERMT2Q" form="ymm {z}, ymm, ymm" xed="VPERMT2Q_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ off := idx[i+1:i]*64
+ dst[i+63:i] := idx[i+2] ? b[off+63:off] : a[off+63:off]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2Q" form="ymm, ymm, ymm" xed="VPERMI2Q_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <instruction name="VPERMT2Q" form="ymm, ymm, ymm" xed="VPERMT2Q_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask2_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="idx" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ off := idx[i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := idx[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2Q" form="xmm {k}, xmm, xmm" xed="VPERMI2Q_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="idx" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ off := idx[i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMT2Q" form="xmm {k}, xmm, xmm" xed="VPERMT2Q_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="idx" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ off := idx[i]*64
+ IF k[j]
+ dst[i+63:i] := (idx[i+1]) ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2Q" form="xmm {z}, xmm, xmm" xed="VPERMI2Q_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <instruction name="VPERMT2Q" form="xmm {z}, xmm, xmm" xed="VPERMT2Q_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="idx" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ off := idx[i]*64
+ dst[i+63:i] := idx[i+1] ? b[off+63:off] : a[off+63:off]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2Q" form="xmm, xmm, xmm" xed="VPERMI2Q_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <instruction name="VPERMT2Q" form="xmm, xmm, xmm" xed="VPERMT2Q_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permute_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI
+IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI
+IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI
+IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI
+IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="ymm {k}, ymm, imm8" xed="VPERMILPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutevar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI
+IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI
+IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI
+IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI
+IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="ymm {k}, ymm, ymm" xed="VPERMILPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permute_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI
+IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI
+IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI
+IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI
+IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="ymm {z}, ymm, imm8" xed="VPERMILPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutevar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI
+IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI
+IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI
+IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI
+IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="ymm {z}, ymm, ymm" xed="VPERMILPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permute_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="xmm {k}, xmm, imm8" xed="VPERMILPD_XMMf64_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permutevar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="xmm {k}, xmm, xmm" xed="VPERMILPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permute_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="xmm {z}, xmm, imm8" xed="VPERMILPD_XMMf64_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permutevar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="xmm {z}, xmm, xmm" xed="VPERMILPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permute_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="ymm {k}, ymm, imm8" xed="VPERMILPS_YMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutevar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], b[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], b[33:32])
+tmp_dst[95:64] := SELECT4(a[127:0], b[65:64])
+tmp_dst[127:96] := SELECT4(a[127:0], b[97:96])
+tmp_dst[159:128] := SELECT4(a[255:128], b[129:128])
+tmp_dst[191:160] := SELECT4(a[255:128], b[161:160])
+tmp_dst[223:192] := SELECT4(a[255:128], b[193:192])
+tmp_dst[255:224] := SELECT4(a[255:128], b[225:224])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="ymm {k}, ymm, ymm" xed="VPERMILPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permute_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="ymm {z}, ymm, imm8" xed="VPERMILPS_YMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutevar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], b[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], b[33:32])
+tmp_dst[95:64] := SELECT4(a[127:0], b[65:64])
+tmp_dst[127:96] := SELECT4(a[127:0], b[97:96])
+tmp_dst[159:128] := SELECT4(a[255:128], b[129:128])
+tmp_dst[191:160] := SELECT4(a[255:128], b[161:160])
+tmp_dst[223:192] := SELECT4(a[255:128], b[193:192])
+tmp_dst[255:224] := SELECT4(a[255:128], b[225:224])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="ymm {z}, ymm, ymm" xed="VPERMILPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permute_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="xmm {k}, xmm, imm8" xed="VPERMILPS_XMMf32_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permutevar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], b[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], b[33:32])
+tmp_dst[95:64] := SELECT4(a[127:0], b[65:64])
+tmp_dst[127:96] := SELECT4(a[127:0], b[97:96])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="xmm {k}, xmm, xmm" xed="VPERMILPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permute_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="xmm {z}, xmm, imm8" xed="VPERMILPS_XMMf32_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permutevar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], b[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], b[33:32])
+tmp_dst[95:64] := SELECT4(a[127:0], b[65:64])
+tmp_dst[127:96] := SELECT4(a[127:0], b[97:96])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="xmm {z}, xmm, xmm" xed="VPERMILPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutex_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMPD" form="ymm {k}, ymm, imm8" xed="VPERMPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutexvar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ id := idx[i+1:i]*64
+ IF k[j]
+ dst[i+63:i] := a[id+63:id]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMPD" form="ymm {k}, ymm, ymm" xed="VPERMPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutex_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMPD" form="ymm {z}, ymm, imm8" xed="VPERMPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutexvar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ id := idx[i+1:i]*64
+ IF k[j]
+ dst[i+63:i] := a[id+63:id]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMPD" form="ymm {z}, ymm, ymm" xed="VPERMPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutex_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMPD" form="ymm, ymm, imm8" xed="VPERMPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutexvar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ id := idx[i+1:i]*64
+ dst[i+63:i] := a[id+63:id]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMPD" form="ymm, ymm, ymm" xed="VPERMPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutexvar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ id := idx[i+2:i]*32
+ IF k[j]
+ dst[i+31:i] := a[id+31:id]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMPS" form="ymm {k}, ymm, ymm" xed="VPERMPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutexvar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ id := idx[i+2:i]*32
+ IF k[j]
+ dst[i+31:i] := a[id+31:id]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMPS" form="ymm {z}, ymm, ymm" xed="VPERMPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutexvar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256i" varname="idx" etype="UI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ id := idx[i+2:i]*32
+ dst[i+31:i] := a[id+31:id]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMPS" form="ymm, ymm, ymm" xed="VPERMPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutex_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 64-bit integers in "a" across lanes lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMQ" form="ymm {k}, ymm, imm8" xed="VPERMQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutexvar_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ id := idx[i+1:i]*64
+ IF k[j]
+ dst[i+63:i] := a[id+63:id]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMQ" form="ymm {k}, ymm, ymm" xed="VPERMQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutex_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMQ" form="ymm {z}, ymm, imm8" xed="VPERMQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutexvar_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ id := idx[i+1:i]*64
+ IF k[j]
+ dst[i+63:i] := a[id+63:id]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMQ" form="ymm {z}, ymm, ymm" xed="VPERMQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutex_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 64-bit integers in "a" across lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMQ" form="ymm, ymm, imm8" xed="VPERMQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutexvar_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="idx" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ id := idx[i+1:i]*64
+ dst[i+63:i] := a[id+63:id]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMQ" form="ymm, ymm, ymm" xed="VPERMQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expand_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="ymm {k}, ymm" xed="VPEXPANDD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expandloadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <description>Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="ymm {k}, m64" xed="VPEXPANDD_YMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expand_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="ymm {z}, ymm" xed="VPEXPANDD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expandloadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <description>Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="ymm {z}, m64" xed="VPEXPANDD_YMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expand_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="xmm {k}, xmm" xed="VPEXPANDD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expandloadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <description>Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="xmm {k}, m64" xed="VPEXPANDD_XMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expand_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="xmm {z}, xmm" xed="VPEXPANDD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expandloadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <description>Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="xmm {z}, m64" xed="VPEXPANDD_XMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expand_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="ymm {k}, ymm" xed="VPEXPANDQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expandloadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <description>Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="ymm {k}, m64" xed="VPEXPANDQ_YMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expand_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="ymm {z}, ymm" xed="VPEXPANDQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expandloadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <description>Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="ymm {z}, m64" xed="VPEXPANDQ_YMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expand_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="xmm {k}, xmm" xed="VPEXPANDQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expandloadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <description>Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="xmm {k}, m64" xed="VPEXPANDQ_XMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expand_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="xmm {z}, xmm" xed="VPEXPANDQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expandloadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <description>Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="xmm {z}, m64" xed="VPEXPANDQ_XMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mmask_i32gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPGATHERDD" form="ymm {k}, vm32y" xed="VPGATHERDD_YMMu32_MASKmskw_MEMu32_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mmask_i32gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERDD" form="xmm {k}, vm32x" xed="VPGATHERDD_XMMu32_MASKmskw_MEMu32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mmask_i32gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="ymm {k}, vm32x" xed="VPGATHERDQ_YMMu64_MASKmskw_MEMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mmask_i32gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="UI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="xmm {k}, vm32x" xed="VPGATHERDQ_XMMu64_MASKmskw_MEMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mmask_i64gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERQD" form="xmm {k}, vm64y" xed="VPGATHERQD_XMMu32_MASKmskw_MEMu32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mmask_i64gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPGATHERQD" form="xmm {k}, vm64x" xed="VPGATHERQD_XMMu32_MASKmskw_MEMu32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mmask_i64gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPGATHERQQ" form="ymm {k}, vm64y" xed="VPGATHERQQ_YMMu64_MASKmskw_MEMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mmask_i64gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPGATHERQQ" form="xmm {k}, vm64x" xed="VPGATHERQQ_XMMu64_MASKmskw_MEMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_max_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSD" form="ymm {k}, ymm, ymm" xed="VPMAXSD_YMMi32_MASKmskw_YMMi32_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_max_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSD" form="ymm {z}, ymm, ymm" xed="VPMAXSD_YMMi32_MASKmskw_YMMi32_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXSD" form="xmm {k}, xmm, xmm" xed="VPMAXSD_XMMi32_MASKmskw_XMMi32_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXSD" form="xmm {z}, xmm, xmm" xed="VPMAXSD_XMMi32_MASKmskw_XMMi32_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_max_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSQ" form="ymm {k}, ymm, ymm" xed="VPMAXSQ_YMMi64_MASKmskw_YMMi64_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_max_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSQ" form="ymm {z}, ymm, ymm" xed="VPMAXSQ_YMMi64_MASKmskw_YMMi64_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_max_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXSQ" form="ymm, ymm, ymm" xed="VPMAXSQ_YMMi64_MASKmskw_YMMi64_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXSQ" form="xmm {k}, xmm, xmm" xed="VPMAXSQ_XMMi64_MASKmskw_XMMi64_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXSQ" form="xmm {z}, xmm, xmm" xed="VPMAXSQ_XMMi64_MASKmskw_XMMi64_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_max_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXSQ" form="xmm, xmm, xmm" xed="VPMAXSQ_XMMi64_MASKmskw_XMMi64_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_max_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUD" form="ymm {k}, ymm, ymm" xed="VPMAXUD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_max_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUD" form="ymm {z}, ymm, ymm" xed="VPMAXUD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXUD" form="xmm {k}, xmm, xmm" xed="VPMAXUD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXUD" form="xmm {z}, xmm, xmm" xed="VPMAXUD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_max_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUQ" form="ymm {k}, ymm, ymm" xed="VPMAXUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_max_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUQ" form="ymm {z}, ymm, ymm" xed="VPMAXUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_max_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMAXUQ" form="ymm, ymm, ymm" xed="VPMAXUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXUQ" form="xmm {k}, xmm, xmm" xed="VPMAXUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXUQ" form="xmm {z}, xmm, xmm" xed="VPMAXUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_max_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMAXUQ" form="xmm, xmm, xmm" xed="VPMAXUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_min_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSD" form="ymm {k}, ymm, ymm" xed="VPMINSD_YMMi32_MASKmskw_YMMi32_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_min_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSD" form="ymm {z}, ymm, ymm" xed="VPMINSD_YMMi32_MASKmskw_YMMi32_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINSD" form="xmm {k}, xmm, xmm" xed="VPMINSD_XMMi32_MASKmskw_XMMi32_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINSD" form="xmm {z}, xmm, xmm" xed="VPMINSD_XMMi32_MASKmskw_XMMi32_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_min_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSQ" form="ymm {k}, ymm, ymm" xed="VPMINSQ_YMMi64_MASKmskw_YMMi64_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_min_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSQ" form="ymm {z}, ymm, ymm" xed="VPMINSQ_YMMi64_MASKmskw_YMMi64_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_min_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINSQ" form="ymm, ymm, ymm" xed="VPMINSQ_YMMi64_MASKmskw_YMMi64_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINSQ" form="xmm {k}, xmm, xmm" xed="VPMINSQ_XMMi64_MASKmskw_XMMi64_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINSQ" form="xmm {z}, xmm, xmm" xed="VPMINSQ_XMMi64_MASKmskw_XMMi64_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_min_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINSQ" form="xmm, xmm, xmm" xed="VPMINSQ_XMMi64_MASKmskw_XMMi64_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_min_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUD" form="ymm {k}, ymm, ymm" xed="VPMINUD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_min_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUD" form="ymm {z}, ymm, ymm" xed="VPMINUD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINUD" form="xmm {k}, xmm, xmm" xed="VPMINUD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINUD" form="xmm {z}, xmm, xmm" xed="VPMINUD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_min_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUQ" form="ymm {k}, ymm, ymm" xed="VPMINUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_min_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUQ" form="ymm {z}, ymm, ymm" xed="VPMINUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_min_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMINUQ" form="ymm, ymm, ymm" xed="VPMINUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINUQ" form="xmm {k}, xmm, xmm" xed="VPMINUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINUQ" form="xmm {z}, xmm, xmm" xed="VPMINUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_min_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMINUQ" form="xmm, xmm, xmm" xed="VPMINUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 8*j
+ dst[k+7:k] := Truncate8(a[i+31:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVDB" form="xmm, ymm" xed="VPMOVDB_XMMu8_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVDB" form="xmm {k}, ymm" xed="VPMOVDB_XMMu8_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi32_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVDB" form="m64 {k}, ymm" xed="VPMOVDB_MEMu8_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVDB" form="xmm {z}, ymm" xed="VPMOVDB_XMMu8_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 8*j
+ dst[k+7:k] := Truncate8(a[i+31:i])
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVDB" form="xmm, xmm" xed="VPMOVDB_XMMu8_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVDB" form="xmm {k}, xmm" xed="VPMOVDB_XMMu8_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi32_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVDB" form="m64 {k}, xmm" xed="VPMOVDB_MEMu8_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVDB" form="xmm {z}, xmm" xed="VPMOVDB_XMMu8_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 16*j
+ dst[k+15:k] := Truncate16(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVDW" form="xmm, ymm" xed="VPMOVDW_XMMu16_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVDW" form="xmm {k}, ymm" xed="VPMOVDW_XMMu16_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi32_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVDW" form="m128 {k}, ymm" xed="VPMOVDW_MEMu16_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVDW" form="xmm {z}, ymm" xed="VPMOVDW_XMMu16_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 16*j
+ dst[k+15:k] := Truncate16(a[i+31:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVDW" form="xmm, xmm" xed="VPMOVDW_XMMu16_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVDW" form="xmm {k}, xmm" xed="VPMOVDW_XMMu16_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi32_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVDW" form="m64 {k}, xmm" xed="VPMOVDW_MEMu16_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVDW" form="xmm {z}, xmm" xed="VPMOVDW_XMMu16_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 8*j
+ dst[k+7:k] := Truncate8(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQB" form="xmm, ymm" xed="VPMOVQB_XMMu8_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQB" form="xmm {k}, ymm" xed="VPMOVQB_XMMu8_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi64_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVQB" form="m32 {k}, ymm" xed="VPMOVQB_MEMu8_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQB" form="xmm {z}, ymm" xed="VPMOVQB_XMMu8_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 8*j
+ dst[k+7:k] := Truncate8(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQB" form="xmm, xmm" xed="VPMOVQB_XMMu8_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQB" form="xmm {k}, xmm" xed="VPMOVQB_XMMu8_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi64_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVQB" form="m16 {k}, xmm" xed="VPMOVQB_MEMu8_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQB" form="xmm {z}, xmm" xed="VPMOVQB_XMMu8_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 32*j
+ dst[k+31:k] := Truncate32(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQD" form="xmm, ymm" xed="VPMOVQD_XMMu32_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Truncate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQD" form="xmm {k}, ymm" xed="VPMOVQD_XMMu32_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi64_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ MEM[base_addr+l+31:base_addr+l] := Truncate32(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVQD" form="m128 {k}, ymm" xed="VPMOVQD_MEMu32_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Truncate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQD" form="xmm {z}, ymm" xed="VPMOVQD_XMMu32_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 32*j
+ dst[k+31:k] := Truncate32(a[i+63:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVQD" form="xmm, xmm" xed="VPMOVQD_XMMu32_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Truncate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVQD" form="xmm {k}, xmm" xed="VPMOVQD_XMMu32_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi64_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ MEM[base_addr+l+31:base_addr+l] := Truncate32(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVQD" form="m64 {k}, xmm" xed="VPMOVQD_MEMu32_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Truncate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVQD" form="xmm {z}, xmm" xed="VPMOVQD_XMMu32_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 16*j
+ dst[k+15:k] := Truncate16(a[i+63:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVQW" form="xmm, ymm" xed="VPMOVQW_XMMu16_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVQW" form="xmm {k}, ymm" xed="VPMOVQW_XMMu16_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi64_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVQW" form="m64 {k}, ymm" xed="VPMOVQW_MEMu16_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVQW" form="xmm {z}, ymm" xed="VPMOVQW_XMMu16_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 16*j
+ dst[k+15:k] := Truncate16(a[i+63:i])
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVQW" form="xmm, xmm" xed="VPMOVQW_XMMu16_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVQW" form="xmm {k}, xmm" xed="VPMOVQW_XMMu16_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi64_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVQW" form="m32 {k}, xmm" xed="VPMOVQW_MEMu16_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVQW" form="xmm {z}, xmm" xed="VPMOVQW_XMMu16_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtsepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 8*j
+ dst[k+7:k] := Saturate8(a[i+31:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSDB" form="xmm, ymm" xed="VPMOVSDB_XMMi8_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSDB" form="xmm {k}, ymm" xed="VPMOVSDB_XMMi8_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi32_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI8" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSDB" form="m64 {k}, ymm" xed="VPMOVSDB_MEMi8_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtsepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSDB" form="xmm {z}, ymm" xed="VPMOVSDB_XMMi8_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtsepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 8*j
+ dst[k+7:k] := Saturate8(a[i+31:i])
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVSDB" form="xmm, xmm" xed="VPMOVSDB_XMMi8_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVSDB" form="xmm {k}, xmm" xed="VPMOVSDB_XMMi8_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi32_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI8" memwidth="32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSDB" form="m32 {k}, xmm" xed="VPMOVSDB_MEMi8_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtsepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVSDB" form="xmm {z}, xmm" xed="VPMOVSDB_XMMi8_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtsepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 16*j
+ dst[k+15:k] := Saturate16(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSDW" form="xmm, ymm" xed="VPMOVSDW_XMMi16_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSDW" form="xmm {k}, ymm" xed="VPMOVSDW_XMMi16_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi32_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI16" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSDW" form="m128 {k}, ymm" xed="VPMOVSDW_MEMi16_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtsepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSDW" form="xmm {z}, ymm" xed="VPMOVSDW_XMMi16_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtsepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 16*j
+ dst[k+15:k] := Saturate16(a[i+31:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSDW" form="xmm, xmm" xed="VPMOVSDW_XMMi16_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSDW" form="xmm {k}, xmm" xed="VPMOVSDW_XMMi16_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi32_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI16" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSDW" form="m64 {k}, xmm" xed="VPMOVSDW_MEMi16_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtsepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSDW" form="xmm {z}, xmm" xed="VPMOVSDW_XMMi16_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtsepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 8*j
+ dst[k+7:k] := Saturate8(a[i+63:i])
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVSQB" form="xmm, ymm" xed="VPMOVSQB_XMMi8_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVSQB" form="xmm {k}, ymm" xed="VPMOVSQB_XMMi8_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi64_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI8" memwidth="32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSQB" form="m32 {k}, ymm" xed="VPMOVSQB_MEMi8_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtsepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVSQB" form="xmm {z}, ymm" xed="VPMOVSQB_XMMi8_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtsepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 8*j
+ dst[k+7:k] := Saturate8(a[i+63:i])
+ENDFOR
+dst[MAX:16] := 0
+ </operation>
+ <instruction name="VPMOVSQB" form="xmm, xmm" xed="VPMOVSQB_XMMi8_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:16] := 0
+ </operation>
+ <instruction name="VPMOVSQB" form="xmm {k}, xmm" xed="VPMOVSQB_XMMi8_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi64_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI8" memwidth="16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSQB" form="m16 {k}, xmm" xed="VPMOVSQB_MEMi8_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtsepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:16] := 0
+ </operation>
+ <instruction name="VPMOVSQB" form="xmm {z}, xmm" xed="VPMOVSQB_XMMi8_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtsepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 32*j
+ dst[k+31:k] := Saturate32(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSQD" form="xmm, ymm" xed="VPMOVSQD_XMMi32_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Saturate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSQD" form="xmm {k}, ymm" xed="VPMOVSQD_XMMi32_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi64_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI32" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ MEM[base_addr+l+31:base_addr+l] := Saturate32(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSQD" form="m128 {k}, ymm" xed="VPMOVSQD_MEMi32_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtsepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Saturate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSQD" form="xmm {z}, ymm" xed="VPMOVSQD_XMMi32_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtsepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 32*j
+ dst[k+31:k] := Saturate32(a[i+63:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSQD" form="xmm, xmm" xed="VPMOVSQD_XMMi32_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Saturate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSQD" form="xmm {k}, xmm" xed="VPMOVSQD_XMMi32_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi64_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI32" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ MEM[base_addr+l+31:base_addr+l] := Saturate32(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSQD" form="m64 {k}, xmm" xed="VPMOVSQD_MEMi32_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtsepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Saturate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSQD" form="xmm {z}, xmm" xed="VPMOVSQD_XMMi32_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtsepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 16*j
+ dst[k+15:k] := Saturate16(a[i+63:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSQW" form="xmm, ymm" xed="VPMOVSQW_XMMi16_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="src" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSQW" form="xmm {k}, ymm" xed="VPMOVSQW_XMMi16_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtsepi64_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI16" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSQW" form="m64 {k}, ymm" xed="VPMOVSQW_MEMi16_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtsepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSQW" form="xmm {z}, ymm" xed="VPMOVSQW_XMMi16_MASKmskw_YMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtsepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 16*j
+ dst[k+15:k] := Saturate16(a[i+63:i])
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVSQW" form="xmm, xmm" xed="VPMOVSQW_XMMi16_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="src" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVSQW" form="xmm {k}, xmm" xed="VPMOVSQW_XMMi16_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsepi64_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI16" memwidth="32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSQW" form="m32 {k}, xmm" xed="VPMOVSQW_MEMi16_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtsepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVSQW" form="xmm {z}, xmm" xed="VPMOVSQW_XMMi16_MASKmskw_XMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXBD" form="ymm {k}, xmm" xed="VPMOVSXBD_YMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXBD" form="ymm {z}, xmm" xed="VPMOVSXBD_YMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXBD" form="xmm {k}, xmm" xed="VPMOVSXBD_XMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXBD" form="xmm {z}, xmm" xed="VPMOVSXBD_XMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__m256i" varname="src" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXBQ" form="ymm {k}, xmm" xed="VPMOVSXBQ_YMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXBQ" form="ymm {z}, xmm" xed="VPMOVSXBQ_YMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="src" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXBQ" form="xmm {k}, xmm" xed="VPMOVSXBQ_XMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXBQ" form="xmm {z}, xmm" xed="VPMOVSXBQ_XMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXDQ" form="ymm {k}, xmm" xed="VPMOVSXDQ_YMMi64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXDQ" form="ymm {z}, xmm" xed="VPMOVSXDQ_YMMi64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="src" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXDQ" form="xmm {k}, xmm" xed="VPMOVSXDQ_XMMi64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXDQ" form="xmm {z}, xmm" xed="VPMOVSXDQ_XMMi64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ l := j*16
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXWD" form="ymm {k}, xmm" xed="VPMOVSXWD_YMMi32_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXWD" form="ymm {z}, xmm" xed="VPMOVSXWD_YMMi32_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ l := j*16
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXWD" form="xmm {k}, xmm" xed="VPMOVSXWD_XMMi32_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXWD" form="xmm {z}, xmm" xed="VPMOVSXWD_XMMi32_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepi16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__m256i" varname="src" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXWQ" form="ymm {k}, xmm" xed="VPMOVSXWQ_YMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepi16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSXWQ" form="ymm {z}, xmm" xed="VPMOVSXWQ_YMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepi16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="src" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXWQ" form="xmm {k}, xmm" xed="VPMOVSXWQ_XMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepi16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSXWQ" form="xmm {z}, xmm" xed="VPMOVSXWQ_XMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtusepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 8*j
+ dst[k+7:k] := SaturateU8(a[i+31:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSDB" form="xmm, ymm" xed="VPMOVUSDB_XMMu8_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSDB" form="xmm {k}, ymm" xed="VPMOVUSDB_XMMu8_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi32_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSDB" form="m64 {k}, ymm" xed="VPMOVUSDB_MEMu8_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtusepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSDB" form="xmm {z}, ymm" xed="VPMOVUSDB_XMMu8_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtusepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 8*j
+ dst[k+7:k] := SaturateU8(a[i+31:i])
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVUSDB" form="xmm, xmm" xed="VPMOVUSDB_XMMu8_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVUSDB" form="xmm {k}, xmm" xed="VPMOVUSDB_XMMu8_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi32_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSDB" form="m32 {k}, xmm" xed="VPMOVUSDB_MEMu8_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtusepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVUSDB" form="xmm {z}, xmm" xed="VPMOVUSDB_XMMu8_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtusepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 16*j
+ dst[k+15:k] := SaturateU16(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSDW" form="xmm, ymm" xed="VPMOVUSDW_XMMu16_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSDW" form="xmm {k}, ymm" xed="VPMOVUSDW_XMMu16_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi32_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSDW" form="m128 {k}, ymm" xed="VPMOVUSDW_MEMu16_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtusepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSDW" form="xmm {z}, ymm" xed="VPMOVUSDW_XMMu16_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtusepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 16*j
+ dst[k+15:k] := SaturateU16(a[i+31:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSDW" form="xmm, xmm" xed="VPMOVUSDW_XMMu16_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSDW" form="xmm {k}, xmm" xed="VPMOVUSDW_XMMu16_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi32_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSDW" form="m64 {k}, xmm" xed="VPMOVUSDW_MEMu16_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtusepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSDW" form="xmm {z}, xmm" xed="VPMOVUSDW_XMMu16_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtusepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 8*j
+ dst[k+7:k] := SaturateU8(a[i+63:i])
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVUSQB" form="xmm, ymm" xed="VPMOVUSQB_XMMu8_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVUSQB" form="xmm {k}, ymm" xed="VPMOVUSQB_XMMu8_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi64_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSQB" form="m32 {k}, ymm" xed="VPMOVUSQB_MEMu8_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtusepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVUSQB" form="xmm {z}, ymm" xed="VPMOVUSQB_XMMu8_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtusepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 8*j
+ dst[k+7:k] := SaturateU8(a[i+63:i])
+ENDFOR
+dst[MAX:16] := 0
+ </operation>
+ <instruction name="VPMOVUSQB" form="xmm, xmm" xed="VPMOVUSQB_XMMu8_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:16] := 0
+ </operation>
+ <instruction name="VPMOVUSQB" form="xmm {k}, xmm" xed="VPMOVUSQB_XMMu8_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi64_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSQB" form="m16 {k}, xmm" xed="VPMOVUSQB_MEMu8_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtusepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:16] := 0
+ </operation>
+ <instruction name="VPMOVUSQB" form="xmm {z}, xmm" xed="VPMOVUSQB_XMMu8_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtusepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 32*j
+ dst[k+31:k] := SaturateU32(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSQD" form="xmm, ymm" xed="VPMOVUSQD_XMMu32_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := SaturateU32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSQD" form="xmm {k}, ymm" xed="VPMOVUSQD_XMMu32_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi64_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ MEM[base_addr+l+31:base_addr+l] := SaturateU32(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSQD" form="m128 {k}, ymm" xed="VPMOVUSQD_MEMu32_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtusepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := SaturateU32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSQD" form="xmm {z}, ymm" xed="VPMOVUSQD_XMMu32_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtusepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 32*j
+ dst[k+31:k] := SaturateU32(a[i+63:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSQD" form="xmm, xmm" xed="VPMOVUSQD_XMMu32_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := SaturateU32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSQD" form="xmm {k}, xmm" xed="VPMOVUSQD_XMMu32_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi64_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ MEM[base_addr+l+31:base_addr+l] := SaturateU32(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSQD" form="m64 {k}, xmm" xed="VPMOVUSQD_MEMu32_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtusepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := SaturateU32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSQD" form="xmm {z}, xmm" xed="VPMOVUSQD_XMMu32_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtusepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ k := 16*j
+ dst[k+15:k] := SaturateU16(a[i+63:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSQW" form="xmm, ymm" xed="VPMOVUSQW_XMMu16_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSQW" form="xmm {k}, ymm" xed="VPMOVUSQW_XMMu16_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtusepi64_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSQW" form="m64 {k}, ymm" xed="VPMOVUSQW_MEMu16_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtusepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSQW" form="xmm {z}, ymm" xed="VPMOVUSQW_XMMu16_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtusepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 16*j
+ dst[k+15:k] := SaturateU16(a[i+63:i])
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVUSQW" form="xmm, xmm" xed="VPMOVUSQW_XMMu16_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVUSQW" form="xmm {k}, xmm" xed="VPMOVUSQW_XMMu16_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtusepi64_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSQW" form="m32 {k}, xmm" xed="VPMOVUSQW_MEMu16_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtusepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPMOVUSQW" form="xmm {z}, xmm" xed="VPMOVUSQW_XMMu16_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepu8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXBD" form="ymm {k}, xmm" xed="VPMOVZXBD_YMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepu8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXBD" form="ymm {z}, xmm" xed="VPMOVZXBD_YMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepu8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXBD" form="xmm {k}, xmm" xed="VPMOVZXBD_XMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepu8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in th elow 4 bytes of "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXBD" form="xmm {z}, xmm" xed="VPMOVZXBD_XMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepu8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXBQ" form="ymm {k}, xmm" xed="VPMOVZXBQ_YMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepu8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXBQ" form="ymm {z}, xmm" xed="VPMOVZXBQ_YMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepu8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXBQ" form="xmm {k}, xmm" xed="VPMOVZXBQ_XMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepu8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 2 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXBQ" form="xmm {z}, xmm" xed="VPMOVZXBQ_XMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepu32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXDQ" form="ymm {k}, xmm" xed="VPMOVZXDQ_YMMi64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepu32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXDQ" form="ymm {z}, xmm" xed="VPMOVZXDQ_YMMi64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepu32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXDQ" form="xmm {k}, xmm" xed="VPMOVZXDQ_XMMi64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepu32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXDQ" form="xmm {z}, xmm" xed="VPMOVZXDQ_XMMi64_MASKmskw_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepu16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXWD" form="ymm {k}, xmm" xed="VPMOVZXWD_YMMi32_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepu16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXWD" form="ymm {z}, xmm" xed="VPMOVZXWD_YMMi32_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepu16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXWD" form="xmm {k}, xmm" xed="VPMOVZXWD_XMMi32_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepu16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXWD" form="xmm {z}, xmm" xed="VPMOVZXWD_XMMi32_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtepu16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXWQ" form="ymm {k}, xmm" xed="VPMOVZXWQ_YMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtepu16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVZXWQ" form="ymm {z}, xmm" xed="VPMOVZXWQ_YMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtepu16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXWQ" form="xmm {k}, xmm" xed="VPMOVZXWQ_XMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtepu16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in the low 4 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVZXWQ" form="xmm {z}, xmm" xed="VPMOVZXWQ_XMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mul_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__m256i" varname="src" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULDQ" form="ymm {k}, ymm, ymm" xed="VPMULDQ_YMMi64_MASKmskw_YMMi32_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mul_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="b" etype="SI32"/>
+ <description>Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULDQ" form="ymm {z}, ymm, ymm" xed="VPMULDQ_YMMi64_MASKmskw_YMMi32_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mul_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="src" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULDQ" form="xmm {k}, xmm, xmm" xed="VPMULDQ_XMMi64_MASKmskw_XMMi32_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mul_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULDQ" form="xmm {z}, xmm, xmm" xed="VPMULDQ_XMMi64_MASKmskw_XMMi32_XMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mullo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ tmp[63:0] := a[i+31:i] * b[i+31:i]
+ dst[i+31:i] := tmp[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULLD" form="ymm {k}, ymm, ymm" xed="VPMULLD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mullo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ tmp[63:0] := a[i+31:i] * b[i+31:i]
+ dst[i+31:i] := tmp[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULLD" form="ymm {z}, ymm, ymm" xed="VPMULLD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mullo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ tmp[63:0] := a[i+31:i] * b[i+31:i]
+ dst[i+31:i] := tmp[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULLD" form="zmm {z}, zmm, zmm" xed="VPMULLD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mullo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ tmp[63:0] := a[i+31:i] * b[i+31:i]
+ dst[i+31:i] := tmp[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULLD" form="xmm {k}, xmm, xmm" xed="VPMULLD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mullo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ tmp[63:0] := a[i+31:i] * b[i+31:i]
+ dst[i+31:i] := tmp[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULLD" form="xmm {z}, xmm, xmm" xed="VPMULLD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_mul_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULUDQ" form="ymm {k}, ymm, ymm" xed="VPMULUDQ_YMMu64_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_mul_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULUDQ" form="ymm {z}, ymm, ymm" xed="VPMULUDQ_YMMu64_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mul_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULUDQ" form="xmm {k}, xmm, xmm" xed="VPMULUDQ_XMMu64_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mul_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULUDQ" form="xmm {z}, xmm, xmm" xed="VPMULUDQ_XMMu64_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_or_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPORD" form="ymm {k}, ymm, ymm" xed="VPORD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_or_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPORD" form="ymm {z}, ymm, ymm" xed="VPORD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_or_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPORD" form="xmm {k}, xmm, xmm" xed="VPORD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_or_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPORD" form="xmm {z}, xmm, xmm" xed="VPORD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_or_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPORQ" form="ymm {k}, ymm, ymm" xed="VPORQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_or_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPORQ" form="ymm {z}, ymm, ymm" xed="VPORQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_or_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPORQ" form="xmm {k}, xmm, xmm" xed="VPORQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_or_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPORQ" form="xmm {z}, xmm, xmm" xed="VPORQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_rol_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLD" form="ymm {k}, ymm, imm8" xed="VPROLD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_rol_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLD" form="ymm {z}, ymm, imm8" xed="VPROLD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_rol_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLD" form="ymm, ymm, imm8" xed="VPROLD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rol_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLD" form="xmm {k}, xmm, imm8" xed="VPROLD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rol_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLD" form="xmm {z}, xmm, imm8" xed="VPROLD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rol_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLD" form="xmm, xmm, imm8" xed="VPROLD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_rol_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLQ" form="ymm {k}, ymm, imm8" xed="VPROLQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_rol_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLQ" form="ymm {z}, ymm, imm8" xed="VPROLQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_rol_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLQ" form="ymm, ymm, imm8" xed="VPROLQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rol_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLQ" form="xmm {k}, xmm, imm8" xed="VPROLQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rol_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLQ" form="xmm {z}, xmm, imm8" xed="VPROLQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rol_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLQ" form="xmm, xmm, imm8" xed="VPROLQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_rolv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLVD" form="ymm {k}, ymm, ymm" xed="VPROLVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_rolv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLVD" form="ymm {z}, ymm, ymm" xed="VPROLVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_rolv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLVD" form="ymm, ymm, ymm" xed="VPROLVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rolv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLVD" form="xmm {k}, xmm, xmm" xed="VPROLVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rolv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLVD" form="xmm {z}, xmm, xmm" xed="VPROLVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rolv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLVD" form="xmm, xmm, xmm" xed="VPROLVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_rolv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLVQ" form="ymm {k}, ymm, ymm" xed="VPROLVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_rolv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLVQ" form="ymm {z}, ymm, ymm" xed="VPROLVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_rolv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPROLVQ" form="ymm, ymm, ymm" xed="VPROLVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rolv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLVQ" form="xmm {k}, xmm, xmm" xed="VPROLVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rolv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLVQ" form="xmm {z}, xmm, xmm" xed="VPROLVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rolv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPROLVQ" form="xmm, xmm, xmm" xed="VPROLVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_ror_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORD" form="ymm {k}, ymm, imm8" xed="VPRORD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_ror_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORD" form="ymm {z}, ymm, imm8" xed="VPRORD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_ror_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORD" form="ymm, ymm, imm8" xed="VPRORD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_ror_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORD" form="xmm {k}, xmm, imm8" xed="VPRORD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_ror_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORD" form="xmm {z}, xmm, imm8" xed="VPRORD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_ror_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORD" form="xmm, xmm, imm8" xed="VPRORD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_ror_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORQ" form="ymm {k}, ymm, imm8" xed="VPRORQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_ror_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORQ" form="ymm {z}, ymm, imm8" xed="VPRORQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_ror_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORQ" form="ymm, ymm, imm8" xed="VPRORQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_ror_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORQ" form="xmm {k}, xmm, imm8" xed="VPRORQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_ror_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORQ" form="xmm {z}, xmm, imm8" xed="VPRORQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_ror_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORQ" form="xmm, xmm, imm8" xed="VPRORQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_rorv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORVD" form="ymm {k}, ymm, ymm" xed="VPRORVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_rorv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORVD" form="ymm {z}, ymm, ymm" xed="VPRORVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_rorv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORVD" form="ymm, ymm, ymm" xed="VPRORVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rorv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORVD" form="xmm {k}, xmm, xmm" xed="VPRORVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rorv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORVD" form="xmm {z}, xmm, xmm" xed="VPRORVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rorv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORVD" form="xmm, xmm, xmm" xed="VPRORVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_rorv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORVQ" form="ymm {k}, ymm, ymm" xed="VPRORVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_rorv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORVQ" form="ymm {z}, ymm, ymm" xed="VPRORVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_rorv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPRORVQ" form="ymm, ymm, ymm" xed="VPRORVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rorv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORVQ" form="xmm {k}, xmm, xmm" xed="VPRORVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rorv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORVQ" form="xmm {z}, xmm, xmm" xed="VPRORVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rorv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPRORVQ" form="xmm, xmm, xmm" xed="VPRORVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_i32scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDD" form="vm32y, ymm" xed="VPSCATTERDD_MEMu32_MASKmskw_YMMu32_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_i32scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDD" form="vm32y {k}, ymm" xed="VPSCATTERDD_MEMu32_MASKmskw_YMMu32_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_i32scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDD" form="vm32x, xmm" xed="VPSCATTERDD_MEMu32_MASKmskw_XMMu32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_i32scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDD" form="vm32x {k}, xmm" xed="VPSCATTERDD_MEMu32_MASKmskw_XMMu32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_i32scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDQ" form="vm32x, ymm" xed="VPSCATTERDQ_MEMu64_MASKmskw_YMMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_i32scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDQ" form="vm32x {k}, ymm" xed="VPSCATTERDQ_MEMu64_MASKmskw_YMMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_i32scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDQ" form="vm32x, xmm" xed="VPSCATTERDQ_MEMu64_MASKmskw_XMMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_i32scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDQ" form="vm32x {k}, xmm" xed="VPSCATTERDQ_MEMu64_MASKmskw_XMMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_i64scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQD" form="vm64y, xmm" xed="VPSCATTERQD_MEMu32_MASKmskw_XMMu32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_i64scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQD" form="vm64y {k}, xmm" xed="VPSCATTERQD_MEMu32_MASKmskw_XMMu32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_i64scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQD" form="vm64x, xmm" xed="VPSCATTERQD_MEMu32_MASKmskw_XMMu32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_i64scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQD" form="vm64x {k}, xmm" xed="VPSCATTERQD_MEMu32_MASKmskw_XMMu32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_i64scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQQ" form="vm64y, ymm" xed="VPSCATTERQQ_MEMu64_MASKmskw_YMMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_i64scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQQ" form="vm64y {k}, ymm" xed="VPSCATTERQQ_MEMu64_MASKmskw_YMMu64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_i64scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQQ" form="vm64x, xmm" xed="VPSCATTERQQ_MEMu64_MASKmskw_XMMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_i64scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQQ" form="vm64x {k}, xmm" xed="VPSCATTERQQ_MEMu64_MASKmskw_XMMu64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shuffle_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="_MM_PERM_ENUM" varname="imm8" etype="IMM" immtype="_MM_PERM"/>
+ <description>Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFD" form="ymm {k}, ymm, imm8" xed="VPSHUFD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shuffle_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="_MM_PERM_ENUM" varname="imm8" etype="IMM" immtype="_MM_PERM"/>
+ <description>Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHUFD" form="ymm {z}, ymm, imm8" xed="VPSHUFD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shuffle_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="_MM_PERM_ENUM" varname="imm8" etype="IMM" immtype="_MM_PERM"/>
+ <description>Shuffle 32-bit integers in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHUFD" form="xmm {k}, xmm, imm8" xed="VPSHUFD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shuffle_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="_MM_PERM_ENUM" varname="imm8" etype="IMM" immtype="_MM_PERM"/>
+ <description>Shuffle 32-bit integers in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHUFD" form="xmm {z}, xmm, imm8" xed="VPSHUFD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sll_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLD" form="ymm {k}, ymm, xmm" xed="VPSLLD_YMMu32_MASKmskw_YMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_slli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLD" form="ymm {k}, ymm, imm8" xed="VPSLLD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sll_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLD" form="ymm {z}, ymm, xmm" xed="VPSLLD_YMMu32_MASKmskw_YMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_slli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLD" form="ymm {z}, ymm, imm8" xed="VPSLLD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sll_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLD" form="xmm {k}, xmm, xmm" xed="VPSLLD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_slli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLD" form="xmm {k}, xmm, imm8" xed="VPSLLD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sll_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLD" form="xmm {z}, xmm, xmm" xed="VPSLLD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_slli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLD" form="xmm {z}, xmm, imm8" xed="VPSLLD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sll_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="ymm {k}, ymm, xmm" xed="VPSLLQ_YMMu64_MASKmskw_YMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_slli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="ymm {k}, ymm, imm8" xed="VPSLLQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sll_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="ymm {z}, ymm, xmm" xed="VPSLLQ_YMMu64_MASKmskw_YMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_slli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="ymm {z}, ymm, imm8" xed="VPSLLQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sll_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="xmm {k}, xmm, xmm" xed="VPSLLQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_slli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="xmm {k}, xmm, imm8" xed="VPSLLQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sll_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="xmm {z}, xmm, xmm" xed="VPSLLQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_slli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="xmm {z}, xmm, imm8" xed="VPSLLQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sllv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLVD" form="ymm {k}, ymm, ymm" xed="VPSLLVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sllv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLVD" form="ymm {z}, ymm, ymm" xed="VPSLLVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sllv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLVD" form="xmm {k}, xmm, xmm" xed="VPSLLVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sllv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLVD" form="xmm {z}, xmm, xmm" xed="VPSLLVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sllv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLVQ" form="ymm {k}, ymm, ymm" xed="VPSLLVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sllv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSLLVQ" form="ymm {z}, ymm, ymm" xed="VPSLLVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sllv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLVQ" form="xmm {k}, xmm, xmm" xed="VPSLLVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sllv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSLLVQ" form="xmm {z}, xmm, xmm" xed="VPSLLVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sra_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAD" form="ymm {k}, ymm, xmm" xed="VPSRAD_YMMu32_MASKmskw_YMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srai_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAD" form="ymm {k}, ymm, imm8" xed="VPSRAD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sra_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAD" form="ymm {z}, ymm, xmm" xed="VPSRAD_YMMu32_MASKmskw_YMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srai_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAD" form="ymm {z}, ymm, imm8" xed="VPSRAD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sra_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAD" form="xmm {k}, xmm, xmm" xed="VPSRAD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srai_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAD" form="xmm {k}, xmm, imm8" xed="VPSRAD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sra_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAD" form="xmm {z}, xmm, xmm" xed="VPSRAD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srai_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="6"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAD" form="xmm {z}, xmm, imm8" xed="VPSRAD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sra_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="ymm {k}, ymm, xmm" xed="VPSRAQ_YMMu64_MASKmskw_YMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srai_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="ymm {k}, ymm, imm8" xed="VPSRAQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sra_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="ymm {z}, ymm, xmm" xed="VPSRAQ_YMMu64_MASKmskw_YMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srai_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="ymm {z}, ymm, imm8" xed="VPSRAQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_sra_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="ymm, ymm, xmm" xed="VPSRAQ_YMMu64_MASKmskw_YMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_srai_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="ymm, ymm, imm8" xed="VPSRAQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sra_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="xmm {k}, xmm, xmm" xed="VPSRAQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srai_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="xmm {k}, xmm, imm8" xed="VPSRAQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sra_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="xmm {z}, xmm, xmm" xed="VPSRAQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srai_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="7"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="xmm {z}, xmm, imm8" xed="VPSRAQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_sra_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="xmm, xmm, xmm" xed="VPSRAQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_srai_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="7"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="xmm, xmm, imm8" xed="VPSRAQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srav_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAVD" form="ymm {k}, ymm, ymm" xed="VPSRAVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srav_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <parameter type="__m256i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAVD" form="ymm {z}, ymm, ymm" xed="VPSRAVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srav_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAVD" form="xmm {k}, xmm, xmm" xed="VPSRAVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srav_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAVD" form="xmm {z}, xmm, xmm" xed="VPSRAVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srav_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAVQ" form="ymm {k}, ymm, ymm" xed="VPSRAVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srav_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAVQ" form="ymm {z}, ymm, ymm" xed="VPSRAVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_srav_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="SI64"/>
+ <parameter type="__m256i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0)
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRAVQ" form="ymm, ymm, ymm" xed="VPSRAVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srav_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAVQ" form="xmm {k}, xmm, xmm" xed="VPSRAVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srav_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAVQ" form="xmm {z}, xmm, xmm" xed="VPSRAVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_srav_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0)
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRAVQ" form="xmm, xmm, xmm" xed="VPSRAVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srl_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLD" form="ymm {k}, ymm, xmm" xed="VPSRLD_YMMu32_MASKmskw_YMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLD" form="ymm {k}, ymm, imm8" xed="VPSRLD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srl_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLD" form="ymm {z}, ymm, xmm" xed="VPSRLD_YMMu32_MASKmskw_YMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLD" form="ymm {z}, ymm, imm8" xed="VPSRLD_YMMu32_MASKmskw_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srl_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLD" form="xmm {k}, xmm, xmm" xed="VPSRLD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLD" form="xmm {k}, xmm, imm8" xed="VPSRLD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srl_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLD" form="xmm {z}, xmm, xmm" xed="VPSRLD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLD" form="xmm {z}, xmm, imm8" xed="VPSRLD_XMMu32_MASKmskw_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srl_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="ymm {k}, ymm, xmm" xed="VPSRLQ_YMMu64_MASKmskw_YMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="ymm {k}, ymm, imm8" xed="VPSRLQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srl_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="ymm {z}, ymm, xmm" xed="VPSRLQ_YMMu64_MASKmskw_YMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="ymm {z}, ymm, imm8" xed="VPSRLQ_YMMu64_MASKmskw_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srl_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="xmm {k}, xmm, xmm" xed="VPSRLQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="xmm {k}, xmm, imm8" xed="VPSRLQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srl_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="xmm {z}, xmm, xmm" xed="VPSRLQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="xmm {z}, xmm, imm8" xed="VPSRLQ_XMMu64_MASKmskw_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srlv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLVD" form="ymm {k}, ymm, ymm" xed="VPSRLVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srlv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLVD" form="ymm {z}, ymm, ymm" xed="VPSRLVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srlv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLVD" form="xmm {k}, xmm, xmm" xed="VPSRLVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srlv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLVD" form="xmm {z}, xmm, xmm" xed="VPSRLVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_srlv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLVQ" form="ymm {k}, ymm, ymm" xed="VPSRLVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_srlv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSRLVQ" form="ymm {z}, ymm, ymm" xed="VPSRLVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_srlv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLVQ" form="xmm {k}, xmm, xmm" xed="VPSRLVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_srlv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSRLVQ" form="xmm {z}, xmm, xmm" xed="VPSRLVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sub_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBD" form="ymm {k}, ymm, ymm" xed="VPSUBD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sub_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBD" form="ymm {z}, ymm, ymm" xed="VPSUBD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sub_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBD" form="xmm {k}, xmm, xmm" xed="VPSUBD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sub_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBD" form="xmm {z}, xmm, xmm" xed="VPSUBD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sub_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBQ" form="ymm {k}, ymm, ymm" xed="VPSUBQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sub_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSUBQ" form="ymm {z}, ymm, ymm" xed="VPSUBQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sub_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBQ" form="xmm {k}, xmm, xmm" xed="VPSUBQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sub_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSUBQ" form="xmm {z}, xmm, xmm" xed="VPSUBQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_ternarylogic_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ FOR h := 0 to 31
+ index[2:0] := (src[i+h] &lt;&lt; 2) OR (a[i+h] &lt;&lt; 1) OR b[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPTERNLOGD" form="ymm {k}, ymm, ymm, imm8" xed="VPTERNLOGD_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_ternarylogic_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="__m256i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ FOR h := 0 to 31
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPTERNLOGD" form="ymm {z}, ymm, ymm, imm8" xed="VPTERNLOGD_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_ternarylogic_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="__m256i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ FOR h := 0 to 31
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPTERNLOGD" form="ymm, ymm, ymm, imm8" xed="VPTERNLOGD_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_ternarylogic_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ FOR h := 0 to 31
+ index[2:0] := (src[i+h] &lt;&lt; 2) OR (a[i+h] &lt;&lt; 1) OR b[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPTERNLOGD" form="xmm {k}, xmm, xmm, imm8" xed="VPTERNLOGD_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_ternarylogic_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ FOR h := 0 to 31
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPTERNLOGD" form="xmm {z}, xmm, xmm, imm8" xed="VPTERNLOGD_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_ternarylogic_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ FOR h := 0 to 31
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPTERNLOGD" form="xmm, xmm, xmm, imm8" xed="VPTERNLOGD_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_ternarylogic_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ FOR h := 0 to 63
+ index[2:0] := (src[i+h] &lt;&lt; 2) OR (a[i+h] &lt;&lt; 1) OR b[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPTERNLOGQ" form="ymm {k}, ymm, ymm, imm8" xed="VPTERNLOGQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_ternarylogic_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ FOR h := 0 to 63
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPTERNLOGQ" form="ymm {z}, ymm, ymm, imm8" xed="VPTERNLOGQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_ternarylogic_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ FOR h := 0 to 63
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPTERNLOGQ" form="ymm, ymm, ymm, imm8" xed="VPTERNLOGQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_ternarylogic_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ FOR h := 0 to 63
+ index[2:0] := (src[i+h] &lt;&lt; 2) OR (a[i+h] &lt;&lt; 1) OR b[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPTERNLOGQ" form="xmm {k}, xmm, xmm, imm8" xed="VPTERNLOGQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_ternarylogic_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ FOR h := 0 to 63
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPTERNLOGQ" form="xmm {z}, xmm, xmm, imm8" xed="VPTERNLOGQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_ternarylogic_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ FOR h := 0 to 63
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPTERNLOGQ" form="xmm, xmm, xmm, imm8" xed="VPTERNLOGQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_test_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTMD" form="k {k}, ymm, ymm" xed="VPTESTMD_MASKmskw_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_test_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTMD" form="k, ymm, ymm" xed="VPTESTMD_MASKmskw_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_test_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPTESTMD" form="k {k}, xmm, xmm" xed="VPTESTMD_MASKmskw_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_test_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPTESTMD" form="k, xmm, xmm" xed="VPTESTMD_MASKmskw_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_test_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPTESTMQ" form="k {k}, ymm, ymm" xed="VPTESTMQ_MASKmskw_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_test_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPTESTMQ" form="k, ymm, ymm" xed="VPTESTMQ_MASKmskw_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_test_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPTESTMQ" form="k {k}, xmm, xmm" xed="VPTESTMQ_MASKmskw_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_test_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPTESTMQ" form="k, xmm, xmm" xed="VPTESTMQ_MASKmskw_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_testn_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k1[j]
+ k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTNMD" form="k {k}, ymm, ymm" xed="VPTESTNMD_MASKmskw_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_testn_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTNMD" form="k, ymm, ymm" xed="VPTESTNMD_MASKmskw_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_testn_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k1[j]
+ k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPTESTNMD" form="k {k}, xmm, xmm" xed="VPTESTNMD_MASKmskw_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_testn_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPTESTNMD" form="k, xmm, xmm" xed="VPTESTNMD_MASKmskw_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_testn_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k1[j]
+ k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPTESTNMQ" form="k {k}, ymm, ymm" xed="VPTESTNMQ_MASKmskw_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_testn_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:4] := 0
+ </operation>
+ <instruction name="VPTESTNMQ" form="k, ymm, ymm" xed="VPTESTNMQ_MASKmskw_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_testn_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k1[j]
+ k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPTESTNMQ" form="k {k}, xmm, xmm" xed="VPTESTNMQ_MASKmskw_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_testn_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:2] := 0
+ </operation>
+ <instruction name="VPTESTNMQ" form="k, xmm, xmm" xed="VPTESTNMQ_MASKmskw_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHDQ" form="ymm {k}, ymm, ymm" xed="VPUNPCKHDQ_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHDQ" form="ymm {z}, ymm, ymm" xed="VPUNPCKHDQ_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKHDQ" form="xmm {k}, xmm, xmm" xed="VPUNPCKHDQ_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKHDQ" form="xmm {z}, xmm, xmm" xed="VPUNPCKHDQ_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHQDQ" form="ymm {k}, ymm, ymm" xed="VPUNPCKHQDQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKHQDQ" form="ymm {z}, ymm, ymm" xed="VPUNPCKHQDQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKHQDQ" form="xmm {k}, xmm, xmm" xed="VPUNPCKHQDQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKHQDQ" form="xmm {z}, xmm, xmm" xed="VPUNPCKHQDQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLDQ" form="ymm {k}, ymm, ymm" xed="VPUNPCKLDQ_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLDQ" form="ymm {z}, ymm, ymm" xed="VPUNPCKLDQ_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKLDQ" form="xmm {k}, xmm, xmm" xed="VPUNPCKLDQ_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKLDQ" form="xmm {z}, xmm, xmm" xed="VPUNPCKLDQ_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLQDQ" form="ymm {k}, ymm, ymm" xed="VPUNPCKLQDQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPUNPCKLQDQ" form="ymm {z}, ymm, ymm" xed="VPUNPCKLQDQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKLQDQ" form="xmm {k}, xmm, xmm" xed="VPUNPCKLQDQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPUNPCKLQDQ" form="xmm {z}, xmm, xmm" xed="VPUNPCKLQDQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_xor_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPXORD" form="ymm {k}, ymm, ymm" xed="VPXORD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_xor_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPXORD" form="ymm {z}, ymm, ymm" xed="VPXORD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_xor_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPXORD" form="xmm {k}, xmm, xmm" xed="VPXORD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_xor_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPXORD" form="xmm {z}, xmm, xmm" xed="VPXORD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_xor_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPXORQ" form="ymm {k}, ymm, ymm" xed="VPXORQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_xor_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPXORQ" form="ymm {z}, ymm, ymm" xed="VPXORQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_xor_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPXORQ" form="xmm {k}, xmm, xmm" xed="VPXORQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_xor_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPXORQ" form="xmm {z}, xmm, xmm" xed="VPXORQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_rcp14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRCP14PD" form="ymm {k}, ymm" xed="VRCP14PD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_rcp14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRCP14PD" form="ymm {z}, ymm" xed="VRCP14PD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_rcp14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRCP14PD" form="ymm, ymm" xed="VRCP14PD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rcp14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14PD" form="xmm {k}, xmm" xed="VRCP14PD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rcp14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14PD" form="xmm {z}, xmm" xed="VRCP14PD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rcp14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14PD" form="xmm, xmm" xed="VRCP14PD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_rcp14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRCP14PS" form="ymm {k}, ymm" xed="VRCP14PS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_rcp14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRCP14PS" form="ymm {z}, ymm" xed="VRCP14PS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_rcp14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRCP14PS" form="ymm, ymm" xed="VRCP14PS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rcp14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14PS" form="xmm {k}, xmm" xed="VRCP14PS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rcp14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14PS" form="xmm {z}, xmm" xed="VRCP14PS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rcp14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14PS" form="xmm, xmm" xed="VRCP14PS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_roundscale_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="ymm {k}, ymm, imm8" xed="VRNDSCALEPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_roundscale_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="ymm {z}, ymm, imm8" xed="VRNDSCALEPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_roundscale_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="ymm, ymm, imm8" xed="VRNDSCALEPD_YMMf64_MASKmskw_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_roundscale_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="xmm {k}, xmm, imm8" xed="VRNDSCALEPD_XMMf64_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_roundscale_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="xmm {z}, xmm, imm8" xed="VRNDSCALEPD_XMMf64_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_roundscale_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="xmm, xmm, imm8" xed="VRNDSCALEPD_XMMf64_MASKmskw_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_roundscale_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="ymm {k}, ymm, imm8" xed="VRNDSCALEPS_YMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_roundscale_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="ymm {z}, ymm, imm8" xed="VRNDSCALEPS_YMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_roundscale_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="ymm, ymm, imm8" xed="VRNDSCALEPS_YMMf32_MASKmskw_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_roundscale_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="xmm {k}, xmm, imm8" xed="VRNDSCALEPS_XMMf32_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_roundscale_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="xmm {z}, xmm, imm8" xed="VRNDSCALEPS_XMMf32_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_roundscale_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="xmm, xmm, imm8" xed="VRNDSCALEPS_XMMf32_MASKmskw_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_rsqrt14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRSQRT14PD" form="ymm {k}, ymm" xed="VRSQRT14PD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_rsqrt14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRSQRT14PD" form="ymm {z}, ymm" xed="VRSQRT14PD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rsqrt14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT14PD" form="xmm {k}, xmm" xed="VRSQRT14PD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rsqrt14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT14PD" form="xmm {z}, xmm" xed="VRSQRT14PD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_rsqrt14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRSQRT14PS" form="ymm {k}, ymm" xed="VRSQRT14PS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_rsqrt14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VRSQRT14PS" form="ymm {z}, ymm" xed="VRSQRT14PS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rsqrt14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT14PS" form="xmm {k}, xmm" xed="VRSQRT14PS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rsqrt14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT14PS" form="xmm {z}, xmm" xed="VRSQRT14PS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_scalef_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="ymm {k}, ymm, ymm" xed="VSCALEFPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_scalef_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="ymm {z}, ymm, ymm" xed="VSCALEFPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_scalef_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="ymm, ymm, ymm" xed="VSCALEFPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_scalef_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="xmm {k}, xmm, xmm" xed="VSCALEFPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_scalef_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="xmm {z}, xmm, xmm" xed="VSCALEFPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_scalef_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="xmm, xmm, xmm" xed="VSCALEFPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_scalef_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="ymm {k}, ymm, ymm" xed="VSCALEFPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_scalef_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="ymm {z}, ymm, ymm" xed="VSCALEFPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_scalef_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="ymm, ymm, ymm" xed="VSCALEFPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_scalef_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="xmm {k}, xmm, xmm" xed="VSCALEFPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_scalef_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="xmm {z}, xmm, xmm" xed="VSCALEFPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_scalef_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="xmm, xmm, xmm" xed="VSCALEFPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_i32scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPD" form="vm32x, ymm" xed="VSCATTERDPD_MEMf64_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_i32scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPD" form="vm32x {k}, ymm" xed="VSCATTERDPD_MEMf64_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_i32scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPD" form="vm32x, xmm" xed="VSCATTERDPD_MEMf64_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_i32scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPD" form="vm32x {k}, xmm" xed="VSCATTERDPD_MEMf64_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_i32scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPS" form="vm32y, ymm" xed="VSCATTERDPS_MEMf32_MASKmskw_YMMf32_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_i32scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPS" form="vm32y {k}, ymm" xed="VSCATTERDPS_MEMf32_MASKmskw_YMMf32_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_i32scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPS" form="vm32x, xmm" xed="VSCATTERDPS_MEMf32_MASKmskw_XMMf32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_i32scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPS" form="vm32x {k}, xmm" xed="VSCATTERDPS_MEMf32_MASKmskw_XMMf32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_i64scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPD" form="vm64y, ymm" xed="VSCATTERQPD_MEMf64_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_i64scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPD" form="vm64y {k}, ymm" xed="VSCATTERQPD_MEMf64_MASKmskw_YMMf64_AVX512_VL256"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_i64scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPD" form="vm64x, xmm" xed="VSCATTERQPD_MEMf64_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_i64scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPD" form="vm64x {k}, xmm" xed="VSCATTERQPD_MEMf64_MASKmskw_XMMf64_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_i64scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPS" form="vm64y, xmm" xed="VSCATTERQPS_MEMf32_MASKmskw_XMMf32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_i64scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPS" form="vm64y {k}, xmm" xed="VSCATTERQPS_MEMf32_MASKmskw_XMMf32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_i64scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPS" form="vm64x, xmm" xed="VSCATTERQPS_MEMf32_MASKmskw_XMMf32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_i64scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="vindex" etype="SI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPS" form="vm64x {k}, xmm" xed="VSCATTERQPS_MEMf32_MASKmskw_XMMf32_AVX512_VL128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shuffle_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst.m128[0] := a.m128[imm8[0]]
+tmp_dst.m128[1] := b.m128[imm8[1]]
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFF32X4" form="ymm {k}, ymm, ymm, imm8" xed="VSHUFF32X4_YMMf32_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shuffle_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst.m128[0] := a.m128[imm8[0]]
+tmp_dst.m128[1] := b.m128[imm8[1]]
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFF32X4" form="ymm {z}, ymm, ymm, imm8" xed="VSHUFF32X4_YMMf32_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shuffle_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+dst.m128[0] := a.m128[imm8[0]]
+dst.m128[1] := b.m128[imm8[1]]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFF32X4" form="ymm, ymm, ymm, imm8" xed="VSHUFF32X4_YMMf32_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shuffle_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst.m128[0] := a.m128[imm8[0]]
+tmp_dst.m128[1] := b.m128[imm8[1]]
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFF64X2" form="ymm {k}, ymm, ymm, imm8" xed="VSHUFF64X2_YMMf64_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shuffle_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst.m128[0] := a.m128[imm8[0]]
+tmp_dst.m128[1] := b.m128[imm8[1]]
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFF64X2" form="ymm {z}, ymm, ymm, imm8" xed="VSHUFF64X2_YMMf64_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shuffle_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+dst.m128[0] := a.m128[imm8[0]]
+dst.m128[1] := b.m128[imm8[1]]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFF64X2" form="ymm, ymm, ymm, imm8" xed="VSHUFF64X2_YMMf64_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shuffle_i32x4">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst.m128[0] := a.m128[imm8[0]]
+tmp_dst.m128[1] := b.m128[imm8[1]]
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFI32X4" form="ymm {k}, ymm, ymm, imm8" xed="VSHUFI32X4_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shuffle_i32x4">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst.m128[0] := a.m128[imm8[0]]
+tmp_dst.m128[1] := b.m128[imm8[1]]
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFI32X4" form="ymm {z}, ymm, ymm, imm8" xed="VSHUFI32X4_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shuffle_i32x4">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+dst.m128[0] := a.m128[imm8[0]]
+dst.m128[1] := b.m128[imm8[1]]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFI32X4" form="ymm, ymm, ymm, imm8" xed="VSHUFI32X4_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shuffle_i64x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst.m128[0] := a.m128[imm8[0]]
+tmp_dst.m128[1] := b.m128[imm8[1]]
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFI64X2" form="ymm {k}, ymm, ymm, imm8" xed="VSHUFI64X2_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shuffle_i64x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst.m128[0] := a.m128[imm8[0]]
+tmp_dst.m128[1] := b.m128[imm8[1]]
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFI64X2" form="ymm {z}, ymm, ymm, imm8" xed="VSHUFI64X2_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shuffle_i64x2">
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+dst.m128[0] := a.m128[imm8[0]]
+dst.m128[1] := b.m128[imm8[1]]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFI64X2" form="ymm, ymm, ymm, imm8" xed="VSHUFI64X2_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shuffle_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
+tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
+tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192]
+tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192]
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFPD" form="ymm {k}, ymm, ymm, imm8" xed="VSHUFPD_YMMf64_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shuffle_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
+tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
+tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192]
+tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192]
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFPD" form="ymm {z}, ymm, ymm, imm8" xed="VSHUFPD_YMMf64_MASKmskw_YMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shuffle_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
+tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSHUFPD" form="xmm {k}, xmm, xmm, imm8" xed="VSHUFPD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shuffle_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
+tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSHUFPD" form="xmm {z}, xmm, xmm, imm8" xed="VSHUFPD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shuffle_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFPS" form="ymm {k}, ymm, ymm, imm8" xed="VSHUFPS_YMMf32_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shuffle_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSHUFPS" form="ymm {z}, ymm, ymm, imm8" xed="VSHUFPS_YMMf32_MASKmskw_YMMf32_YMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shuffle_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSHUFPS" form="xmm {k}, xmm, xmm, imm8" xed="VSHUFPS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shuffle_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSHUFPS" form="xmm {z}, xmm, xmm, imm8" xed="VSHUFPS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SQRT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSQRTPD" form="ymm {k}, ymm" xed="VSQRTPD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SQRT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSQRTPD" form="ymm {z}, ymm" xed="VSQRTPD_YMMf64_MASKmskw_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SQRT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTPD" form="xmm {k}, xmm" xed="VSQRTPD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SQRT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTPD" form="xmm {z}, xmm" xed="VSQRTPD_XMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SQRT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSQRTPS" form="ymm {k}, ymm" xed="VSQRTPS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SQRT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSQRTPS" form="ymm {z}, ymm" xed="VSQRTPS_YMMf32_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SQRT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTPS" form="xmm {k}, xmm" xed="VSQRTPS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SQRT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTPS" form="xmm {z}, xmm" xed="VSQRTPS_XMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSUBPD" form="ymm {k}, ymm, ymm" xed="VSUBPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSUBPD" form="ymm {z}, ymm, ymm" xed="VSUBPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBPD" form="xmm {k}, xmm, xmm" xed="VSUBPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBPD" form="xmm {z}, xmm, xmm" xed="VSUBPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_sub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSUBPS" form="ymm {k}, ymm, ymm" xed="VSUBPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_sub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VSUBPS" form="ymm {z}, ymm, ymm" xed="VSUBPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBPS" form="xmm {k}, xmm, xmm" xed="VSUBPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBPS" form="xmm {z}, xmm, xmm" xed="VSUBPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKHPD" form="ymm {k}, ymm, ymm" xed="VUNPCKHPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKHPD" form="ymm {z}, ymm, ymm" xed="VUNPCKHPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VUNPCKHPD" form="xmm {k}, xmm, xmm" xed="VUNPCKHPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VUNPCKHPD" form="xmm {z}, xmm, xmm" xed="VUNPCKHPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKHPS" form="ymm {k}, ymm, ymm" xed="VUNPCKHPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKHPS" form="ymm {z}, ymm, ymm" xed="VUNPCKHPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VUNPCKHPS" form="xmm {k}, xmm, xmm" xed="VUNPCKHPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VUNPCKHPS" form="xmm {z}, xmm, xmm" xed="VUNPCKHPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKLPD" form="ymm {k}, ymm, ymm" xed="VUNPCKLPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKLPD" form="ymm {z}, ymm, ymm" xed="VUNPCKLPD_YMMf64_MASKmskw_YMMf64_YMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VUNPCKLPD" form="xmm {k}, xmm, xmm" xed="VUNPCKLPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VUNPCKLPD" form="xmm {z}, xmm, xmm" xed="VUNPCKLPD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_unpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKLPS" form="ymm {k}, ymm, ymm" xed="VUNPCKLPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_unpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VUNPCKLPS" form="ymm {z}, ymm, ymm" xed="VUNPCKLPS_YMMf32_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_unpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VUNPCKLPS" form="xmm {k}, xmm, xmm" xed="VUNPCKLPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_unpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VUNPCKLPS" form="xmm {z}, xmm, xmm" xed="VUNPCKLPS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_storeu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Store 512-bits (composed of 8 packed 64-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVDQU64" form="m512, zmm" xed="VMOVDQU64_MEMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Store 512-bits (composed of 16 packed 32-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVDQU32" form="m512, zmm" xed="VMOVDQU32_MEMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_storeu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Store 256-bits (composed of 4 packed 64-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVDQU64" form="m256, ymm" xed="VMOVDQU64_MEMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Store 256-bits (composed of 8 packed 32-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVDQU32" form="m256, ymm" xed="VMOVDQU32_MEMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_storeu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Store 128-bits (composed of 2 packed 64-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="VMOVDQU64" form="m128, xmm" xed="VMOVDQU64_MEMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Store 128-bits (composed of 4 packed 32-bit integers) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="VMOVDQU32" form="m128, xmm" xed="VMOVDQU32_MEMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_store_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Store 256-bits (composed of 4 packed 64-bit integers) from "a" into memory.
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVDQA64" form="m256, ymm" xed="VMOVDQA64_MEMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_store_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Store 256-bits (composed of 8 packed 32-bit integers) from "a" into memory.
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+255:mem_addr] := a[255:0]
+ </operation>
+ <instruction name="VMOVDQA32" form="m256, ymm" xed="VMOVDQA32_MEMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_store_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Store 128-bits (composed of 2 packed 64-bit integers) from "a" into memory.
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="VMOVDQA64" form="m128, xmm" xed="VMOVDQA64_MEMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_store_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Store 128-bits (composed of 4 packed 32-bit integers) from "a" into memory.
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="VMOVDQA32" form="m128, xmm" xed="VMOVDQA32_MEMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_loadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <description>Load 512-bits (composed of 8 packed 64-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU64" form="zmm, m512" xed="VMOVDQU64_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_loadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <description>Load 512-bits (composed of 16 packed 32-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU32" form="zmm, m512" xed="VMOVDQU32_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_loadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <description>Load 256-bits (composed of 4 packed 64-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU64" form="ymm, m256" xed="VMOVDQU64_YMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_loadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <description>Load 256-bits (composed of 8 packed 32-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQU32" form="ymm, m256" xed="VMOVDQU32_YMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_loadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <description>Load 128-bits (composed of 2 packed 64-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU64" form="xmm, m128" xed="VMOVDQU64_XMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_loadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <description>Load 128-bits (composed of 4 packed 32-bit integers) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQU32" form="xmm, m128" xed="VMOVDQU32_XMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_load_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="256"/>
+ <description>Load 256-bits (composed of 4 packed 64-bit integers) from memory into "dst".
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="ymm, m256" xed="VMOVDQA64_YMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_load_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="256"/>
+ <description>Load 256-bits (composed of 8 packed 32-bit integers) from memory into "dst".
+ "mem_addr" must be aligned on a 32-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[255:0] := MEM[mem_addr+255:mem_addr]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="ymm, m256" xed="VMOVDQA32_YMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_load_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="128"/>
+ <description>Load 128-bits (composed of 2 packed 64-bit integers) from memory into "dst".
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="xmm, m128" xed="VMOVDQA64_XMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_load_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <description>Load 128-bits (composed of 4 packed 32-bit integers) from memory into "dst".
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="xmm, m128" xed="VMOVDQA32_XMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_xor_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPXORQ" form="ymm, ymm" xed="VPXORQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_xor_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPXORD" form="ymm, ymm" xed="VPXORD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_xor_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPXORQ" form="xmm, xmm" xed="VPXORQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_xor_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPXORD" form="xmm, xmm" xed="VPXORD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_or_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPORQ" form="ymm, ymm" xed="VPORQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_or_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPORD" form="ymm, ymm" xed="VPORD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_or_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPORQ" form="xmm, xmm" xed="VPORQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_or_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPORD" form="xmm, xmm" xed="VPORD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_aesenclast_epi128">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>VAES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m512i" varname="dst" etype="M128"/>
+ <parameter type="__m512i" varname="a" etype="M128"/>
+ <parameter type="__m512i" varname="RoundKey" etype="M128"/>
+ <description>Perform the last round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst"."</description>
+ <operation>FOR j := 0 to 3
+ i := j*128
+ a[i+127:i] := ShiftRows(a[i+127:i])
+ a[i+127:i] := SubBytes(a[i+127:i])
+ dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VAESENCLAST" form="zmm, zmm" xed="VAESENCLAST_ZMMu128_ZMMu128_ZMMu128_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_aesenc_epi128">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>VAES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m512i" varname="dst" etype="M128"/>
+ <parameter type="__m512i" varname="a" etype="M128"/>
+ <parameter type="__m512i" varname="RoundKey" etype="M128"/>
+ <description>Perform one round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst"."</description>
+ <operation>FOR j := 0 to 3
+ i := j*128
+ a[i+127:i] := ShiftRows(a[i+127:i])
+ a[i+127:i] := SubBytes(a[i+127:i])
+ a[i+127:i] := MixColumns(a[i+127:i])
+ dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VAESENC" form="zmm, zmm" xed="VAESENC_ZMMu128_ZMMu128_ZMMu128_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_aesdeclast_epi128">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>VAES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m512i" varname="dst" etype="M128"/>
+ <parameter type="__m512i" varname="a" etype="M128"/>
+ <parameter type="__m512i" varname="RoundKey" etype="M128"/>
+ <description>Perform the last round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*128
+ a[i+127:i] := InvShiftRows(a[i+127:i])
+ a[i+127:i] := InvSubBytes(a[i+127:i])
+ dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VAESDECLAST" form="zmm, zmm" xed="VAESDECLAST_ZMMu128_ZMMu128_ZMMu128_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_aesdec_epi128">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <CPUID>VAES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m512i" varname="dst" etype="M128"/>
+ <parameter type="__m512i" varname="a" etype="M128"/>
+ <parameter type="__m512i" varname="RoundKey" etype="M128"/>
+ <description>Perform one round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*128
+ a[i+127:i] := InvShiftRows(a[i+127:i])
+ a[i+127:i] := InvSubBytes(a[i+127:i])
+ a[i+127:i] := InvMixColumns(a[i+127:i])
+ dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VAESDEC" form="zmm, zmm" xed="VAESDEC_ZMMu128_ZMMu128_ZMMu128_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kand_mask16">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise AND of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := a[15:0] AND b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KANDW" form="k, k, k" xed="KANDW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kandn_mask16">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise NOT of 16-bit masks "a" and then AND with "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := (NOT a[15:0]) AND b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KANDNW" form="k, k, k" xed="KANDNW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_knot_mask16">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <description>Compute the bitwise NOT of 16-bit mask "a", and store the result in "k".</description>
+ <operation>
+k[15:0] := NOT a[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KNOTW" form="k, k" xed="KNOTW_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kor_mask16">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := a[15:0] OR b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KORW" form="k, k, k" xed="KORW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kxnor_mask16">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XNOR of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := NOT (a[15:0] XOR b[15:0])
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KXNORW" form="k, k, k" xed="KXNORW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kxor_mask16">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XOR of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := a[15:0] XOR b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KXORW" form="k, k, k" xed="KXORW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kshiftli_mask16">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="unsigned int" varname="count" etype="IMM" immwidth="8"/>
+ <description>Shift the bits of 16-bit mask "a" left by "count" while shifting in zeros, and store the least significant 16 bits of the result in "k".</description>
+ <operation>
+k[MAX:0] := 0
+IF count[7:0] &lt;= 15
+ k[15:0] := a[15:0] &lt;&lt; count[7:0]
+FI
+ </operation>
+ <instruction name="KSHIFTLW" form="k, k, imm8" xed="KSHIFTLW_MASKmskw_MASKmskw_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kshiftri_mask16">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="unsigned int" varname="count" etype="IMM" immwidth="8"/>
+ <description>Shift the bits of 16-bit mask "a" right by "count" while shifting in zeros, and store the least significant 16 bits of the result in "k".</description>
+ <operation>
+k[MAX:0] := 0
+IF count[7:0] &lt;= 15
+ k[15:0] := a[15:0] &gt;&gt; count[7:0]
+FI
+ </operation>
+ <instruction name="KSHIFTRW" form="k, k, imm8" xed="KSHIFTRW_MASKmskw_MASKmskw_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_load_mask16">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16*" varname="mem_addr" etype="MASK" memwidth="16"/>
+ <description>Load 16-bit mask from memory into "k".</description>
+ <operation>
+k[15:0] := MEM[mem_addr+15:mem_addr]
+ </operation>
+ <instruction name="KMOVW" form="k, m16" xed="KMOVW_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_store_mask16">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__mmask16*" varname="mem_addr" etype="MASK" memwidth="16"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <description>Store 16-bit mask from "a" into memory.</description>
+ <operation>
+MEM[mem_addr+15:mem_addr] := a[15:0]
+ </operation>
+ <instruction name="KMOVW" form="m16, k" xed="KMOVW_MEMu16_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortest_mask16_u8">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <parameter type="unsigned char*" varname="all_ones" etype="UI8" memwidth="8"/>
+ <description>Compute the bitwise OR of 16-bit masks "a" and "b". If the result is all zeros, store 1 in "dst", otherwise store 0 in "dst". If the result is all ones, store 1 in "all_ones", otherwise store 0 in "all_ones".</description>
+ <operation>
+tmp[15:0] := a[15:0] OR b[15:0]
+IF tmp[15:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+IF tmp[15:0] == 0xFFFF
+ MEM[all_ones+7:all_ones] := 1
+ELSE
+ MEM[all_ones+7:all_ones] := 0
+FI
+ </operation>
+ <instruction name="KORTESTW" form="k, k" xed="KORTESTW_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortestz_mask16_u8">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 16-bit masks "a" and "b". If the result is all zeroes, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[15:0] := a[15:0] OR b[15:0]
+IF tmp[15:0] == 0x0
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KORTESTW" form="k, k" xed="KORTESTW_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_kortestc_mask16_u8">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 16-bit masks "a" and "b". If the result is all ones, store 1 in "dst", otherwise store 0 in "dst".</description>
+ <operation>
+tmp[15:0] := a[15:0] OR b[15:0]
+IF tmp[15:0] == 0xFFFF
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="KORTESTW" form="k, k" xed="KORTESTW_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_cvtmask16_u32">
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <description>Convert 16-bit mask "a" into an integer value, and store the result in "dst".</description>
+ <operation>
+dst := ZeroExtend32(a[15:0])
+ </operation>
+ <instruction name="KMOVW" form="r32, k" xed="KMOVW_GPR32u32_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_cvtu32_mask16">
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="unsigned int" varname="a" etype="UI16"/>
+ <description>Convert integer value "a" into an 16-bit mask, and store the result in "k".</description>
+ <operation>
+k := ZeroExtend16(a[15:0])
+ </operation>
+ <instruction name="KMOVW" form="k, r32" xed="KMOVW_MASKmskw_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_kandn">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise NOT of 16-bit masks "a" and then AND with "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := (NOT a[15:0]) AND b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KANDNW" form="k, k, k" xed="KANDNW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_kand">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise AND of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := a[15:0] AND b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KANDW" form="k, k, k" xed="KANDW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_kmov">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <description>Copy 16-bit mask "a" to "k".</description>
+ <operation>
+k[15:0] := a[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KMOVW" form="k, k" xed="KMOVW_MASKmskw_MASKu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_knot">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <description>Compute the bitwise NOT of 16-bit mask "a", and store the result in "k".</description>
+ <operation>
+k[15:0] := NOT a[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KNOTW" form="k, k" xed="KNOTW_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_kor">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := a[15:0] OR b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KORW" form="k, k, k" xed="KORW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_kunpackb">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Unpack and interleave 8 bits from masks "a" and "b", and store the 16-bit result in "k".</description>
+ <operation>
+k[7:0] := b[7:0]
+k[15:8] := a[7:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KUNPCKBW" form="k, k, k" xed="KUNPCKBW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_kxnor">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XNOR of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := NOT (a[15:0] XOR b[15:0])
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KXNORW" form="k, k, k" xed="KXNORW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_kxor">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XOR of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := a[15:0] XOR b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KXORW" form="k, k, k" xed="KXORW_MASKmskw_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_add_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPD" form="zmm {z}, zmm, zmm" xed="VADDPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_add_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPD" form="zmm {z}, zmm, zmm {er}" xed="VADDPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_add_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPS" form="zmm {z}, zmm, zmm" xed="VADDPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_add_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPS" form="zmm {z}, zmm, zmm {er}" xed="VADDPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_add_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := a[63:0] + b[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDSD" form="xmm, xmm, xmm {er}" xed="VADDSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_add_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] + b[63:0]
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDSD" form="xmm {k}, xmm, xmm {er}" xed="VADDSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_add_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] + b[63:0]
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDSD" form="xmm {k}, xmm, xmm" xed="VADDSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_add_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] + b[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDSD" form="xmm {z}, xmm, xmm {er}" xed="VADDSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_add_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] + b[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDSD" form="xmm {z}, xmm, xmm" xed="VADDSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_add_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := a[31:0] + b[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDSS" form="xmm, xmm, xmm {er}" xed="VADDSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_add_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] + b[31:0]
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDSS" form="xmm {k}, xmm, xmm {er}" xed="VADDSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_add_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] + b[31:0]
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDSS" form="xmm {k}, xmm, xmm" xed="VADDSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_add_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] + b[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDSS" form="xmm {z}, xmm, xmm {er}" xed="VADDSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_add_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] + b[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VADDSS" form="xmm {z}, xmm, xmm" xed="VADDSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_alignr_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 32-bit elements, and stores the low 64 bytes (16 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+temp[1023:512] := a[511:0]
+temp[511:0] := b[511:0]
+temp[1023:0] := temp[1023:0] &gt;&gt; (32*imm8[3:0])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := temp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VALIGND" form="zmm {z}, zmm, zmm, imm8" xed="VALIGND_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_alignr_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="3"/>
+ <description>Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 64 bytes (8 elements) in "dst".</description>
+ <operation>
+temp[1023:512] := a[511:0]
+temp[511:0] := b[511:0]
+temp[1023:0] := temp[1023:0] &gt;&gt; (64*imm8[2:0])
+dst[511:0] := temp[511:0]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VALIGNQ" form="zmm, zmm, zmm, imm8" xed="VALIGNQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_alignr_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="3"/>
+ <description>Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 64-bit elements, and store the low 64 bytes (8 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+temp[1023:512] := a[511:0]
+temp[511:0] := b[511:0]
+temp[1023:0] := temp[1023:0] &gt;&gt; (64*imm8[2:0])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := temp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VALIGNQ" form="zmm {k}, zmm, zmm, imm8" xed="VALIGNQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_alignr_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="3"/>
+ <description>Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 64-bit elements, and stores the low 64 bytes (8 elements) in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+temp[1023:512] := a[511:0]
+temp[511:0] := b[511:0]
+temp[1023:0] := temp[1023:0] &gt;&gt; (64*imm8[2:0])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := temp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VALIGNQ" form="zmm {z}, zmm, zmm, imm8" xed="VALIGNQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcast_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 4)*32
+ dst[i+31:i] := a[n+31:n]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X4" form="zmm, m128" xed="VBROADCASTF32X4_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcast_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 4)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X4" form="zmm {k}, m128" xed="VBROADCASTF32X4_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcast_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the 4 packed single-precision (32-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 4)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF32X4" form="zmm {z}, m128" xed="VBROADCASTF32X4_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcast_f64x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Broadcast the 4 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 4)*64
+ dst[i+63:i] := a[n+63:n]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF64X4" form="zmm, m256" xed="VBROADCASTF64X4_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcast_f64x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Broadcast the 4 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 4)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF64X4" form="zmm {k}, m256" xed="VBROADCASTF64X4_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcast_f64x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Broadcast the 4 packed double-precision (64-bit) floating-point elements from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 4)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTF64X4" form="zmm {z}, m256" xed="VBROADCASTF64X4_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcast_i32x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 4)*32
+ dst[i+31:i] := a[n+31:n]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X4" form="zmm, m128" xed="VBROADCASTI32X4_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcast_i32x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 4)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X4" form="zmm {k}, m128" xed="VBROADCASTI32X4_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcast_i32x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the 4 packed 32-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ n := (j % 4)*32
+ IF k[j]
+ dst[i+31:i] := a[n+31:n]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI32X4" form="zmm {z}, m128" xed="VBROADCASTI32X4_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcast_i64x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 4)*64
+ dst[i+63:i] := a[n+63:n]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI64X4" form="zmm, m256" xed="VBROADCASTI64X4_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcast_i64x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 4)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI64X4" form="zmm {k}, m256" xed="VBROADCASTI64X4_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcast_i64x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Broadcast the 4 packed 64-bit integers from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ n := (j % 4)*64
+ IF k[j]
+ dst[i+63:i] := a[n+63:n]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTI64X4" form="zmm {z}, m256" xed="VBROADCASTI64X4_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcastsd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTSD" form="zmm, xmm" xed="VBROADCASTSD_ZMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcastsd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTSD" form="zmm {k}, xmm" xed="VBROADCASTSD_ZMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcastsd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Broadcast the low double-precision (64-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTSD" form="zmm {z}, xmm" xed="VBROADCASTSD_ZMMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcastss_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTSS" form="zmm, xmm" xed="VBROADCASTSS_ZMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcastss_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTSS" form="zmm {k}, xmm" xed="VBROADCASTSS_ZMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcastss_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Broadcast the low single-precision (32-bit) floating-point element from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBROADCASTSS" form="zmm {z}, xmm" xed="VBROADCASTSS_ZMMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_round_sd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k". [sae_note]</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VCMPSD" form="k, xmm, xmm {sae}, imm8" xed="VCMPSD_MASKmskw_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_sd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VCMPSD" form="k, xmm, xmm, imm8" xed="VCMPSD_MASKmskw_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_round_sd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). [sae_note]</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+IF k1[0]
+ k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0
+ELSE
+ k[0] := 0
+FI
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VCMPSD" form="k {k}, xmm, xmm {sae}, imm8" xed="VCMPSD_MASKmskw_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_sd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set).</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+IF k1[0]
+ k[0] := ( a[63:0] OP b[63:0] ) ? 1 : 0
+ELSE
+ k[0] := 0
+FI
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VCMPSD" form="k {k}, xmm, xmm, imm8" xed="VCMPSD_MASKmskw_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_round_ss_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k". [sae_note]</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VCMPSS" form="k, xmm, xmm {sae}, imm8" xed="VCMPSS_MASKmskw_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cmp_ss_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VCMPSS" form="k, xmm, xmm, imm8" xed="VCMPSS_MASKmskw_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_round_ss_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set). [sae_note]</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+IF k1[0]
+ k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0
+ELSE
+ k[0] := 0
+FI
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VCMPSS" form="k {k}, xmm, xmm {sae}, imm8" xed="VCMPSS_MASKmskw_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cmp_ss_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and store the result in mask vector "k" using zeromask "k1" (the element is zeroed out when mask bit 0 is not set).</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+IF k1[0]
+ k[0] := ( a[31:0] OP b[31:0] ) ? 1 : 0
+ELSE
+ k[0] := 0
+FI
+k[MAX:1] := 0
+ </operation>
+ <instruction name="VCMPSS" form="k {k}, xmm, xmm, imm8" xed="VCMPSS_MASKmskw_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_comi_round_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and return the boolean result (0 or 1). [sae_note]</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+RETURN ( a[63:0] OP b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="VCOMISD" form="xmm, xmm {sae}" xed="VCOMISD_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_comi_round_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" based on the comparison operand specified by "imm8", and return the boolean result (0 or 1). [sae_note]</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+RETURN ( a[31:0] OP b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="VCOMISS" form="xmm, xmm {sae}" xed="VCOMISS_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VUCOMISS" form="xmm, xmm {sae}" xed="VUCOMISS_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compress_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := src[511:m]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCOMPRESSPD" form="zmm {k}, zmm" xed="VCOMPRESSPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compressstoreu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <category>Swizzle</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64" memwidth="512"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 64
+m := base_addr
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ MEM[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VCOMPRESSPD" form="m512 {k}, zmm" xed="VCOMPRESSPD_MEMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_compress_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Contiguously store the active double-precision (64-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := 0
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCOMPRESSPD" form="zmm {z}, zmm" xed="VCOMPRESSPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compress_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := src[511:m]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCOMPRESSPS" form="zmm {k}, zmm" xed="VCOMPRESSPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compressstoreu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <category>Swizzle</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32" memwidth="512"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 32
+m := base_addr
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ MEM[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VCOMPRESSPS" form="m512 {k}, zmm" xed="VCOMPRESSPS_MEMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_compress_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Contiguously store the active single-precision (32-bit) floating-point elements in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := 0
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCOMPRESSPS" form="zmm {z}, zmm" xed="VCOMPRESSPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTDQ2PD" form="zmm, ymm" xed="VCVTDQ2PD_ZMMf64_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ IF k[j]
+ dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
+ ELSE
+ dst[m+63:m] := src[m+63:m]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTDQ2PD" form="zmm {k}, ymm" xed="VCVTDQ2PD_ZMMf64_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ IF k[j]
+ dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
+ ELSE
+ dst[m+63:m] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTDQ2PD" form="zmm {z}, ymm" xed="VCVTDQ2PD_ZMMf64_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTDQ2PS" form="zmm, zmm {er}" xed="VCVTDQ2PS_ZMMf32_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTDQ2PS" form="zmm, zmm" xed="VCVTDQ2PS_ZMMf32_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTDQ2PS" form="zmm {k}, zmm {er}" xed="VCVTDQ2PS_ZMMf32_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTDQ2PS" form="zmm {k}, zmm" xed="VCVTDQ2PS_ZMMf32_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTDQ2PS" form="zmm {z}, zmm {er}" xed="VCVTDQ2PS_ZMMf32_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTDQ2PS" form="zmm {z}, zmm" xed="VCVTDQ2PS_ZMMf32_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2DQ" form="ymm, zmm {er}" xed="VCVTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2DQ" form="ymm, zmm" xed="VCVTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2DQ" form="ymm {k}, zmm {er}" xed="VCVTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2DQ" form="ymm {k}, zmm" xed="VCVTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2DQ" form="ymm {z}, zmm {er}" xed="VCVTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2DQ" form="ymm {z}, zmm" xed="VCVTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="ymm, zmm {er}" xed="VCVTPD2PS_YMMf32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="ymm, zmm" xed="VCVTPD2PS_YMMf32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="ymm {k}, zmm {er}" xed="VCVTPD2PS_YMMf32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="ymm {k}, zmm" xed="VCVTPD2PS_YMMf32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="ymm {z}, zmm {er}" xed="VCVTPD2PS_YMMf32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_FP32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="ymm {z}, zmm" xed="VCVTPD2PS_YMMf32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="ymm, zmm {er}" xed="VCVTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[k+63:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="ymm, zmm" xed="VCVTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="ymm {k}, zmm {er}" xed="VCVTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="ymm {k}, zmm" xed="VCVTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="ymm {z}, zmm {er}" xed="VCVTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPD2UDQ" form="ymm {z}, zmm" xed="VCVTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundph_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m256i" varname="a" etype="FP16"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*16
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="zmm, ymm {sae}" xed="VCVTPH2PS_ZMMf32_MASKmskw_YMMf16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtph_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m256i" varname="a" etype="FP16"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*16
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="zmm, ymm" xed="VCVTPH2PS_ZMMf32_MASKmskw_YMMf16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundph_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="FP16"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*16
+ IF k[j]
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="zmm {k}, ymm {sae}" xed="VCVTPH2PS_ZMMf32_MASKmskw_YMMf16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtph_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="FP16"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*16
+ IF k[j]
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="zmm {k}, ymm" xed="VCVTPH2PS_ZMMf32_MASKmskw_YMMf16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundph_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="FP16"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*16
+ IF k[j]
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="zmm {z}, ymm {sae}" xed="VCVTPH2PS_ZMMf32_MASKmskw_YMMf16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtph_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="FP16"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*16
+ IF k[j]
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="zmm {z}, ymm" xed="VCVTPH2PS_ZMMf32_MASKmskw_YMMf16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2DQ" form="zmm, zmm {er}" xed="VCVTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2DQ" form="zmm, zmm" xed="VCVTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2DQ" form="zmm {k}, zmm {er}" xed="VCVTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2DQ" form="zmm {k}, zmm" xed="VCVTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2DQ" form="zmm {z}, zmm {er}" xed="VCVTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2DQ" form="zmm {z}, zmm" xed="VCVTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundps_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 32*j
+ dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2PD" form="zmm, ymm {sae}" xed="VCVTPS2PD_ZMMf64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtps_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 32*j
+ dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2PD" form="zmm, ymm" xed="VCVTPS2PD_ZMMf64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundps_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2PD" form="zmm {k}, ymm {sae}" xed="VCVTPS2PD_ZMMf64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtps_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2PD" form="zmm {k}, ymm" xed="VCVTPS2PD_ZMMf64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundps_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2PD" form="zmm {z}, ymm {sae}" xed="VCVTPS2PD_ZMMf64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtps_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := Convert_FP32_To_FP64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2PD" form="zmm {z}, ymm" xed="VCVTPS2PD_ZMMf64_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 32*j
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="ymm, zmm {sae}" xed="VCVTPS2PH_YMMf16_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 32*j
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="ymm, zmm {sae}" xed="VCVTPS2PH_YMMf16_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="ymm {k}, zmm {sae}" xed="VCVTPS2PH_YMMf16_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="ymm {k}, zmm {sae}" xed="VCVTPS2PH_YMMf16_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="ymm {z}, zmm {sae}" xed="VCVTPS2PH_YMMf16_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtps_ph">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 16*j
+ l := 32*j
+ IF k[j]
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="ymm {z}, zmm {sae}" xed="VCVTPS2PH_YMMf16_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="zmm, zmm {er}" xed="VCVTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="zmm, zmm" xed="VCVTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="zmm {k}, zmm {er}" xed="VCVTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="zmm {k}, zmm" xed="VCVTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="zmm {z}, zmm {er}" xed="VCVTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_UInt32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2UDQ" form="zmm {z}, zmm" xed="VCVTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundsd_i32">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_Int32(a[63:0])
+ </operation>
+ <instruction name="VCVTSD2SI" form="r32, xmm {er}" xed="VCVTSD2SI_GPR32i32_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundsd_i64">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_Int64(a[63:0])
+ </operation>
+ <instruction name="VCVTSD2SI" form="r64, xmm {er}" xed="VCVTSD2SI_GPR64i64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundsd_si32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_Int32(a[63:0])
+ </operation>
+ <instruction name="VCVTSD2SI" form="r32, xmm {er}" xed="VCVTSD2SI_GPR32i32_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundsd_si64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_Int64(a[63:0])
+ </operation>
+ <instruction name="VCVTSD2SI" form="r64, xmm {er}" xed="VCVTSD2SI_GPR64i64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtsd_i32">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_Int32(a[63:0])
+ </operation>
+ <instruction name="VCVTSD2SI" form="r32, xmm" xed="VCVTSD2SI_GPR32i32_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtsd_i64">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_Int64(a[63:0])
+ </operation>
+ <instruction name="VCVTSD2SI" form="r64, xmm" xed="VCVTSD2SI_GPR64i64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundsd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_FP32(b[63:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSD2SS" form="xmm, xmm, xmm {er}" xed="VCVTSD2SS_XMMf32_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvt_roundsd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := Convert_FP64_To_FP32(b[63:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSD2SS" form="xmm {k}, xmm, xmm {er}" xed="VCVTSD2SS_XMMf32_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtsd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := Convert_FP64_To_FP32(b[63:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSD2SS" form="xmm {k}, xmm, xmm" xed="VCVTSD2SS_XMMf32_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvt_roundsd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := Convert_FP64_To_FP32(b[63:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSD2SS" form="xmm {z}, xmm, xmm {er}" xed="VCVTSD2SS_XMMf32_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtsd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := Convert_FP64_To_FP32(b[63:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSD2SS" form="xmm {z}, xmm, xmm" xed="VCVTSD2SS_XMMf32_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundsd_u32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_UInt32(a[63:0])
+ </operation>
+ <instruction name="VCVTSD2USI" form="r32, xmm {er}" xed="VCVTSD2USI_GPR32u32_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundsd_u64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_UInt64(a[63:0])
+ </operation>
+ <instruction name="VCVTSD2USI" form="r64, xmm {er}" xed="VCVTSD2USI_GPR64u64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtsd_u32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_UInt32(a[63:0])
+ </operation>
+ <instruction name="VCVTSD2USI" form="r32, xmm" xed="VCVTSD2USI_GPR32u32_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtsd_u64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_UInt64(a[63:0])
+ </operation>
+ <instruction name="VCVTSD2USI" form="r64, xmm" xed="VCVTSD2USI_GPR64u64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundi64_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__int64" varname="b" etype="SI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the signed 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := Convert_Int64_To_FP64(b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSI2SD" form="xmm, xmm, r64 {er}" xed="VCVTSI2SD_XMMf64_XMMf64_GPR64i64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundsi64_sd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__int64" varname="b" etype="SI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the signed 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := Convert_Int64_To_FP64(b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSI2SD" form="xmm, xmm, r64 {er}" xed="VCVTSI2SD_XMMf64_XMMf64_GPR64i64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvti32_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="b" etype="SI32"/>
+ <description>Convert the signed 32-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := Convert_Int32_To_FP64(b[31:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSI2SD" form="xmm, xmm, r32" xed="VCVTSI2SD_XMMf64_XMMf64_GPR32i32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvti64_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__int64" varname="b" etype="SI64"/>
+ <description>Convert the signed 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := Convert_Int64_To_FP64(b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSI2SD" form="xmm, xmm, r64" xed="VCVTSI2SD_XMMf64_XMMf64_GPR64i64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundi32_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="b" etype="SI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the signed 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSI2SS" form="xmm, xmm, r32 {er}" xed="VCVTSI2SS_XMMf32_XMMf32_GPR32i32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundi64_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__int64" varname="b" etype="SI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the signed 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_Int64_To_FP32(b[63:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSI2SS" form="xmm, xmm, r64 {er}" xed="VCVTSI2SS_XMMf32_XMMf32_GPR64i64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundsi32_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="b" etype="SI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the signed 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSI2SS" form="xmm, xmm, r32 {er}" xed="VCVTSI2SS_XMMf32_XMMf32_GPR32i32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundsi64_ss">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__int64" varname="b" etype="SI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the signed 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_Int64_To_FP32(b[63:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSI2SS" form="xmm, xmm, r64 {er}" xed="VCVTSI2SS_XMMf32_XMMf32_GPR64i64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvti32_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="b" etype="SI32"/>
+ <description>Convert the signed 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSI2SS" form="xmm, xmm, r32" xed="VCVTSI2SS_XMMf32_XMMf32_GPR32i32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvti64_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__int64" varname="b" etype="SI64"/>
+ <description>Convert the signed 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := Convert_Int64_To_FP32(b[63:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSI2SS" form="xmm, xmm, r64" xed="VCVTSI2SS_XMMf32_XMMf32_GPR64i64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundss_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [sae_note]</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_FP64(b[31:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSS2SD" form="xmm, xmm, xmm {sae}" xed="VCVTSS2SD_XMMf64_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvt_roundss_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := Convert_FP32_To_FP64(b[31:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSS2SD" form="xmm {k}, xmm, xmm {sae}" xed="VCVTSS2SD_XMMf64_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtss_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := Convert_FP32_To_FP64(b[31:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSS2SD" form="xmm {k}, xmm, xmm" xed="VCVTSS2SD_XMMf64_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvt_roundss_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := Convert_FP32_To_FP64(b[31:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSS2SD" form="xmm {z}, xmm, xmm {sae}" xed="VCVTSS2SD_XMMf64_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtss_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := Convert_FP32_To_FP64(b[31:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTSS2SD" form="xmm {z}, xmm, xmm" xed="VCVTSS2SD_XMMf64_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundss_i32">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_Int32(a[31:0])
+ </operation>
+ <instruction name="VCVTSS2SI" form="r32, xmm {er}" xed="VCVTSS2SI_GPR32i32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundss_i64">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_Int64(a[31:0])
+ </operation>
+ <instruction name="VCVTSS2SI" form="r64, xmm {er}" xed="VCVTSS2SI_GPR64i64_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundss_si32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_Int32(a[31:0])
+ </operation>
+ <instruction name="VCVTSS2SI" form="r32, xmm {er}" xed="VCVTSS2SI_GPR32i32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundss_si64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_Int64(a[31:0])
+ </operation>
+ <instruction name="VCVTSS2SI" form="r64, xmm {er}" xed="VCVTSS2SI_GPR64i64_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtss_i32">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_Int32(a[31:0])
+ </operation>
+ <instruction name="VCVTSS2SI" form="r32, xmm" xed="VCVTSS2SI_GPR32i32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtss_i64">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_Int64(a[31:0])
+ </operation>
+ <instruction name="VCVTSS2SI" form="r64, xmm" xed="VCVTSS2SI_GPR64i64_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundss_u32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_UInt32(a[31:0])
+ </operation>
+ <instruction name="VCVTSS2USI" form="r32, xmm {er}" xed="VCVTSS2USI_GPR32u32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundss_u64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_UInt64(a[31:0])
+ </operation>
+ <instruction name="VCVTSS2USI" form="r64, xmm {er}" xed="VCVTSS2USI_GPR64u64_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtss_u32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_UInt32(a[31:0])
+ </operation>
+ <instruction name="VCVTSS2USI" form="r32, xmm" xed="VCVTSS2USI_GPR32u32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtss_u64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_UInt64(a[31:0])
+ </operation>
+ <instruction name="VCVTSS2USI" form="r64, xmm" xed="VCVTSS2USI_GPR64u64_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtt_roundpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2DQ" form="ymm, zmm {sae}" xed="VCVTTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvttpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2DQ" form="ymm, zmm" xed="VCVTTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtt_roundpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2DQ" form="ymm {k}, zmm {sae}" xed="VCVTTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvttpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2DQ" form="ymm {k}, zmm" xed="VCVTTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtt_roundpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2DQ" form="ymm {z}, zmm {sae}" xed="VCVTTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvttpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2DQ" form="ymm {z}, zmm" xed="VCVTTPD2DQ_YMMi32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtt_roundpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="ymm, zmm {sae}" xed="VCVTTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvttpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[k+63:k])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="ymm, zmm" xed="VCVTTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtt_roundpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="ymm {k}, zmm {sae}" xed="VCVTTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvttpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="ymm {k}, zmm" xed="VCVTTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtt_roundpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="ymm {z}, zmm {sae}" xed="VCVTTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvttpd_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 32*j
+ l := 64*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[l+63:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTTPD2UDQ" form="ymm {z}, zmm" xed="VCVTTPD2UDQ_YMMu32_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtt_roundps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2DQ" form="zmm, zmm {sae}" xed="VCVTTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvttps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2DQ" form="zmm, zmm" xed="VCVTTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtt_roundps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2DQ" form="zmm {k}, zmm {sae}" xed="VCVTTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvttps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2DQ" form="zmm {k}, zmm" xed="VCVTTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtt_roundps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2DQ" form="zmm {z}, zmm {sae}" xed="VCVTTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvttps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2DQ" form="zmm {z}, zmm" xed="VCVTTPS2DQ_ZMMi32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtt_roundps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="zmm, zmm {sae}" xed="VCVTTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvttps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="zmm, zmm" xed="VCVTTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtt_roundps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="zmm {k}, zmm {sae}" xed="VCVTTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvttps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="zmm {k}, zmm" xed="VCVTTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtt_roundps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP32_To_UInt32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="zmm {z}, zmm {sae}" xed="VCVTTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvttps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed double-precision (32-bit) floating-point elements in "a" to packed unsigned 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_FP64_To_UInt32_Truncate(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTTPS2UDQ" form="zmm {z}, zmm" xed="VCVTTPS2UDQ_ZMMu32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundsd_i32">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0])
+ </operation>
+ <instruction name="VCVTTSD2SI" form="r32, xmm {sae}" xed="VCVTTSD2SI_GPR32i32_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundsd_i64">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0])
+ </operation>
+ <instruction name="VCVTTSD2SI" form="r64, xmm {sae}" xed="VCVTTSD2SI_GPR64i64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundsd_si32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0])
+ </operation>
+ <instruction name="VCVTTSD2SI" form="r32, xmm {sae}" xed="VCVTTSD2SI_GPR32i32_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundsd_si64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0])
+ </operation>
+ <instruction name="VCVTTSD2SI" form="r64, xmm {sae}" xed="VCVTTSD2SI_GPR64i64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttsd_i32">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0])
+ </operation>
+ <instruction name="VCVTTSD2SI" form="r32, xmm" xed="VCVTTSD2SI_GPR32i32_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttsd_i64">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0])
+ </operation>
+ <instruction name="VCVTTSD2SI" form="r64, xmm" xed="VCVTTSD2SI_GPR64i64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundsd_u32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_UInt32_Truncate(a[63:0])
+ </operation>
+ <instruction name="VCVTTSD2USI" form="r32, xmm {sae}" xed="VCVTTSD2USI_GPR32u32_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundsd_u64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_UInt64_Truncate(a[63:0])
+ </operation>
+ <instruction name="VCVTTSD2USI" form="r64, xmm {sae}" xed="VCVTTSD2USI_GPR64u64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttsd_u32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_UInt32_Truncate(a[63:0])
+ </operation>
+ <instruction name="VCVTTSD2USI" form="r32, xmm" xed="VCVTTSD2USI_GPR32u32_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttsd_u64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_UInt64_Truncate(a[63:0])
+ </operation>
+ <instruction name="VCVTTSD2USI" form="r64, xmm" xed="VCVTTSD2USI_GPR64u64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundss_i32">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0])
+ </operation>
+ <instruction name="VCVTTSS2SI" form="r32, xmm {sae}" xed="VCVTTSS2SI_GPR32i32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundss_i64">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0])
+ </operation>
+ <instruction name="VCVTTSS2SI" form="r64, xmm {sae}" xed="VCVTTSS2SI_GPR64i64_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundss_si32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0])
+ </operation>
+ <instruction name="VCVTTSS2SI" form="r32, xmm {sae}" xed="VCVTTSS2SI_GPR32i32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundss_si64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0])
+ </operation>
+ <instruction name="VCVTTSS2SI" form="r64, xmm {sae}" xed="VCVTTSS2SI_GPR64i64_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttss_i32">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0])
+ </operation>
+ <instruction name="VCVTTSS2SI" form="r32, xmm" xed="VCVTTSS2SI_GPR32i32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttss_i64">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0])
+ </operation>
+ <instruction name="VCVTTSS2SI" form="r64, xmm" xed="VCVTTSS2SI_GPR64i64_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundss_u32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_UInt32_Truncate(a[31:0])
+ </operation>
+ <instruction name="VCVTTSS2USI" form="r32, xmm {sae}" xed="VCVTTSS2USI_GPR32u32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtt_roundss_u64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst".
+ [sae_note]</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_UInt64_Truncate(a[31:0])
+ </operation>
+ <instruction name="VCVTTSS2USI" form="r64, xmm {sae}" xed="VCVTTSS2USI_GPR64u64_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttss_u32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 32-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_UInt32_Truncate(a[31:0])
+ </operation>
+ <instruction name="VCVTTSS2USI" form="r32, xmm" xed="VCVTTSS2USI_GPR32u32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvttss_u64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to an unsigned 64-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_UInt64_Truncate(a[31:0])
+ </operation>
+ <instruction name="VCVTTSS2USI" form="r64, xmm" xed="VCVTTSS2USI_GPR64u64_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepu32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PD" form="zmm, ymm" xed="VCVTUDQ2PD_ZMMf64_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepu32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PD" form="zmm {k}, ymm" xed="VCVTUDQ2PD_ZMMf64_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepu32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[i+63:i] := Convert_Int64_To_FP64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PD" form="zmm {z}, ymm" xed="VCVTUDQ2PD_ZMMf64_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvt_roundepu32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PS" form="zmm, zmm {er}" xed="VCVTUDQ2PS_ZMMf32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepu32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PS" form="zmm, zmm" xed="VCVTUDQ2PS_ZMMf32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvt_roundepu32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PS" form="zmm {k}, zmm {er}" xed="VCVTUDQ2PS_ZMMf32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepu32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PS" form="zmm {k}, zmm" xed="VCVTUDQ2PS_ZMMf32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvt_roundepu32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PS" form="zmm {z}, zmm {er}" xed="VCVTUDQ2PS_ZMMf32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepu32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PS" form="zmm {z}, zmm" xed="VCVTUDQ2PS_ZMMf32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundu64_sd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="unsigned __int64" varname="b" etype="UI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the unsigned 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := Convert_Int64_To_FP64(b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUSI2SD" form="xmm, xmm, r64 {er}" xed="VCVTUSI2SD_XMMf64_XMMf64_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtu32_sd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="unsigned int" varname="b" etype="UI32"/>
+ <description>Convert the unsigned 32-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := Convert_Int32_To_FP64(b[31:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUSI2SD" form="xmm, xmm, r32" xed="VCVTUSI2SD_XMMf64_XMMf64_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtu64_sd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="unsigned __int64" varname="b" etype="UI64"/>
+ <description>Convert the unsigned 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := Convert_Int64_To_FP64(b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUSI2SD" form="xmm, xmm, r64" xed="VCVTUSI2SD_XMMf64_XMMf64_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundu32_ss">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="unsigned int" varname="b" etype="UI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the unsigned 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUSI2SS" form="xmm, xmm, r32 {er}" xed="VCVTUSI2SS_XMMf32_XMMf32_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvt_roundu64_ss">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="unsigned __int64" varname="b" etype="UI64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the unsigned 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := Convert_Int64_To_FP32(b[63:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUSI2SS" form="xmm, xmm, r64 {er}" xed="VCVTUSI2SS_XMMf32_XMMf32_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtu32_ss">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="unsigned int" varname="b" etype="UI32"/>
+ <description>Convert the unsigned 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUSI2SS" form="xmm, xmm, r32" xed="VCVTUSI2SS_XMMf32_XMMf32_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtu64_ss">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="unsigned __int64" varname="b" etype="UI64"/>
+ <description>Convert the unsigned 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := Convert_Int64_To_FP32(b[63:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTUSI2SS" form="xmm, xmm, r64" xed="VCVTUSI2SS_XMMf32_XMMf32_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_div_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPD" form="zmm, zmm, zmm" xed="VDIVPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_div_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", =and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPD" form="zmm, zmm, zmm {er}" xed="VDIVPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_div_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPD" form="zmm {k}, zmm, zmm" xed="VDIVPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_div_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPD" form="zmm {k}, zmm, zmm {er}" xed="VDIVPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_div_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPD" form="zmm {z}, zmm, zmm" xed="VDIVPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_div_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPD" form="zmm {z}, zmm, zmm {er}" xed="VDIVPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_div_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPS" form="zmm, zmm, zmm" xed="VDIVPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_div_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPS" form="zmm, zmm, zmm {er}" xed="VDIVPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_div_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPS" form="zmm {k}, zmm, zmm" xed="VDIVPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_div_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPS" form="zmm {k}, zmm, zmm {er}" xed="VDIVPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_div_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPS" form="zmm {z}, zmm, zmm" xed="VDIVPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_div_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDIVPS" form="zmm {z}, zmm, zmm {er}" xed="VDIVPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_div_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := a[63:0] / b[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVSD" form="xmm, xmm, xmm {er}" xed="VDIVSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_div_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] / b[63:0]
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVSD" form="xmm {k}, xmm, xmm {er}" xed="VDIVSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_div_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] / b[63:0]
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVSD" form="xmm {k}, xmm, xmm" xed="VDIVSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_div_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] / b[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVSD" form="xmm {z}, xmm, xmm {er}" xed="VDIVSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_div_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] / b[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVSD" form="xmm {z}, xmm, xmm" xed="VDIVSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_div_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := a[31:0] / b[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVSS" form="xmm, xmm, xmm {er}" xed="VDIVSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_div_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] / b[31:0]
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVSS" form="xmm {k}, xmm, xmm {er}" xed="VDIVSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_div_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] / b[31:0]
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVSS" form="xmm {k}, xmm, xmm" xed="VDIVSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_div_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] / b[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVSS" form="xmm {z}, xmm, xmm {er}" xed="VDIVSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_div_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] / b[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDIVSS" form="xmm {z}, xmm, xmm" xed="VDIVSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expand_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="zmm {k}, zmm" xed="VEXPANDPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expandloadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="zmm {k}, m512" xed="VEXPANDPD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expand_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="zmm {z}, zmm" xed="VEXPANDPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expandloadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <description>Load contiguous active double-precision (64-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXPANDPD" form="zmm {z}, m512" xed="VEXPANDPD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expand_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="zmm {k}, zmm" xed="VEXPANDPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expandloadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="zmm {k}, m512" xed="VEXPANDPS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expand_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="zmm {z}, zmm" xed="VEXPANDPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expandloadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <description>Load contiguous active single-precision (32-bit) floating-point elements from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXPANDPS" form="zmm {z}, m512" xed="VEXPANDPS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_extractf32x4_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[1:0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+2: dst[127:0] := a[383:256]
+3: dst[127:0] := a[511:384]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF32X4" form="xmm, zmm, imm8" xed="VEXTRACTF32X4_XMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_extractf32x4_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[1:0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+2: tmp[127:0] := a[383:256]
+3: tmp[127:0] := a[511:384]
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF32X4" form="xmm {k}, zmm, imm8" xed="VEXTRACTF32X4_XMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_extractf32x4_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[1:0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+2: tmp[127:0] := a[383:256]
+3: tmp[127:0] := a[511:384]
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTF32X4" form="xmm {z}, zmm, imm8" xed="VEXTRACTF32X4_XMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_extractf64x4_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[255:0] := a[255:0]
+1: dst[255:0] := a[511:256]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTF64X4" form="ymm, zmm, imm8" xed="VEXTRACTF64X4_YMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_extractf64x4_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[255:0] := a[255:0]
+1: tmp[255:0] := a[511:256]
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTF64X4" form="ymm {k}, zmm, imm8" xed="VEXTRACTF64X4_YMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_extractf64x4_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[255:0] := a[255:0]
+1: tmp[255:0] := a[511:256]
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTF64X4" form="ymm {z}, zmm, imm8" xed="VEXTRACTF64X4_YMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_extracti32x4_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[1:0] OF
+0: dst[127:0] := a[127:0]
+1: dst[127:0] := a[255:128]
+2: dst[127:0] := a[383:256]
+3: dst[127:0] := a[511:384]
+ESAC
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI32X4" form="xmm, zmm, imm8" xed="VEXTRACTI32X4_XMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_extracti32x4_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[1:0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+2: tmp[127:0] := a[383:256]
+3: tmp[127:0] := a[511:384]
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI32X4" form="xmm {k}, zmm, imm8" xed="VEXTRACTI32X4_XMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_extracti32x4_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract 128 bits (composed of 4 packed 32-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[1:0] OF
+0: tmp[127:0] := a[127:0]
+1: tmp[127:0] := a[255:128]
+2: tmp[127:0] := a[383:256]
+3: tmp[127:0] := a[511:384]
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VEXTRACTI32X4" form="xmm {z}, zmm, imm8" xed="VEXTRACTI32X4_XMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_extracti64x4_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+CASE imm8[0] OF
+0: dst[255:0] := a[255:0]
+1: dst[255:0] := a[511:256]
+ESAC
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTI64X4" form="ymm, zmm, imm8" xed="VEXTRACTI64X4_YMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_extracti64x4_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[255:0] := a[255:0]
+1: tmp[255:0] := a[511:256]
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTI64X4" form="ymm {k}, zmm, imm8" xed="VEXTRACTI64X4_YMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_extracti64x4_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract 256 bits (composed of 4 packed 64-bit integers) from "a", selected with "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+CASE imm8[0] OF
+0: tmp[255:0] := a[255:0]
+1: tmp[255:0] := a[511:256]
+ESAC
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VEXTRACTI64X4" form="ymm {z}, zmm, imm8" xed="VEXTRACTI64X4_YMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fixupimm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="zmm, zmm, zmm, imm8" xed="VFIXUPIMMPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fixupimm_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="zmm, zmm, zmm, imm8 {sae}" xed="VFIXUPIMMPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fixupimm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="zmm {k}, zmm, zmm, imm8" xed="VFIXUPIMMPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fixupimm_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="zmm {k}, zmm, zmm, imm8 {sae}" xed="VFIXUPIMMPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fixupimm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="zmm {z}, zmm, zmm, imm8" xed="VFIXUPIMMPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fixupimm_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up packed double-precision (64-bit) floating-point elements in "a" and "b" using packed 64-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FIXUPIMMPD(a[i+63:i], b[i+63:i], c[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPD" form="zmm {z}, zmm, zmm, imm8 {sae}" xed="VFIXUPIMMPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fixupimm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="zmm, zmm, zmm, imm8" xed="VFIXUPIMMPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fixupimm_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst". "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="zmm, zmm, zmm, imm8 {sae}" xed="VFIXUPIMMPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fixupimm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="zmm {k}, zmm, zmm, imm8" xed="VFIXUPIMMPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fixupimm_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="zmm {k}, zmm, zmm, imm8 {sae}" xed="VFIXUPIMMPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fixupimm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="zmm {z}, zmm, zmm, imm8" xed="VFIXUPIMMPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fixupimm_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up packed single-precision (32-bit) floating-point elements in "a" and "b" using packed 32-bit integers in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FIXUPIMMPD(a[i+31:i], b[i+31:i], c[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPIMMPS" form="zmm {z}, zmm, zmm, imm8 {sae}" xed="VFIXUPIMMPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fixupimm_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSD" form="xmm, xmm, xmm, imm8 {sae}" xed="VFIXUPIMMSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fixupimm_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSD" form="xmm, xmm, xmm, imm8" xed="VFIXUPIMMSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fixupimm_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+IF k[0]
+ dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := a[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSD" form="xmm {k}, xmm, xmm, imm8 {sae}" xed="VFIXUPIMMSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fixupimm_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+IF k[0]
+ dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := a[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSD" form="xmm {k}, xmm, xmm, imm8" xed="VFIXUPIMMSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fixupimm_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+IF k[0]
+ dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSD" form="xmm {z}, xmm, xmm, imm8 {sae}" xed="VFIXUPIMMSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fixupimm_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up the lower double-precision (64-bit) floating-point elements in "a" and "b" using the lower 64-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[63:0], src2[63:0], src3[63:0], imm8[7:0]) {
+ tsrc[63:0] := ((src2[62:52] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[63:0]
+ CASE(tsrc[63:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[63:0] := src1[63:0]
+ 1 : dest[63:0] := tsrc[63:0]
+ 2 : dest[63:0] := QNaN(tsrc[63:0])
+ 3 : dest[63:0] := QNAN_Indefinite
+ 4 : dest[63:0] := -INF
+ 5 : dest[63:0] := +INF
+ 6 : dest[63:0] := tsrc.sign? -INF : +INF
+ 7 : dest[63:0] := -0
+ 8 : dest[63:0] := +0
+ 9 : dest[63:0] := -1
+ 10: dest[63:0] := +1
+ 11: dest[63:0] := 1/2
+ 12: dest[63:0] := 90.0
+ 13: dest[63:0] := PI/2
+ 14: dest[63:0] := MAX_FLOAT
+ 15: dest[63:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[63:0]
+}
+IF k[0]
+ dst[63:0] := FIXUPIMMPD(a[63:0], b[63:0], c[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSD" form="xmm {z}, xmm, xmm, imm8" xed="VFIXUPIMMSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fixupimm_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSS" form="xmm, xmm, xmm, imm8 {sae}" xed="VFIXUPIMMSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fixupimm_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSS" form="xmm, xmm, xmm, imm8" xed="VFIXUPIMMSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fixupimm_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+IF k[0]
+ dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := a[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSS" form="xmm {k}, xmm, xmm, imm8 {sae}" xed="VFIXUPIMMSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fixupimm_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+IF k[0]
+ dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := a[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSS" form="xmm {k}, xmm, xmm, imm8" xed="VFIXUPIMMSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fixupimm_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting.
+ [sae_note]</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+IF k[0]
+ dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSS" form="xmm {z}, xmm, xmm, imm8 {sae}" xed="VFIXUPIMMSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fixupimm_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Fix up the lower single-precision (32-bit) floating-point elements in "a" and "b" using the lower 32-bit integer in "c", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". "imm8" is used to set the required flags reporting.</description>
+ <operation>enum TOKEN_TYPE {
+ QNAN_TOKEN := 0, \
+ SNAN_TOKEN := 1, \
+ ZERO_VALUE_TOKEN := 2, \
+ ONE_VALUE_TOKEN := 3, \
+ NEG_INF_TOKEN := 4, \
+ POS_INF_TOKEN := 5, \
+ NEG_VALUE_TOKEN := 6, \
+ POS_VALUE_TOKEN := 7
+}
+DEFINE FIXUPIMMPD(src1[31:0], src2[31:0], src3[31:0], imm8[7:0]) {
+ tsrc[31:0] := ((src2[30:23] == 0) AND (MXCSR.DAZ == 1)) ? 0.0 : src2[31:0]
+ CASE(tsrc[31:0]) OF
+ QNAN_TOKEN:j := 0
+ SNAN_TOKEN:j := 1
+ ZERO_VALUE_TOKEN: j := 2
+ ONE_VALUE_TOKEN: j := 3
+ NEG_INF_TOKEN: j := 4
+ POS_INF_TOKEN: j := 5
+ NEG_VALUE_TOKEN: j := 6
+ POS_VALUE_TOKEN: j := 7
+ ESAC
+
+ token_response[3:0] := src3[3+4*j:4*j]
+
+ CASE(token_response[3:0]) OF
+ 0 : dest[31:0] := src1[31:0]
+ 1 : dest[31:0] := tsrc[31:0]
+ 2 : dest[31:0] := QNaN(tsrc[31:0])
+ 3 : dest[31:0] := QNAN_Indefinite
+ 4 : dest[31:0] := -INF
+ 5 : dest[31:0] := +INF
+ 6 : dest[31:0] := tsrc.sign? -INF : +INF
+ 7 : dest[31:0] := -0
+ 8 : dest[31:0] := +0
+ 9 : dest[31:0] := -1
+ 10: dest[31:0] := +1
+ 11: dest[31:0] := 1/2
+ 12: dest[31:0] := 90.0
+ 13: dest[31:0] := PI/2
+ 14: dest[31:0] := MAX_FLOAT
+ 15: dest[31:0] := -MAX_FLOAT
+ ESAC
+
+ CASE(tsrc[31:0]) OF
+ ZERO_VALUE_TOKEN:
+ IF (imm8[0]) #ZE; FI
+ ZERO_VALUE_TOKEN:
+ IF (imm8[1]) #IE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[2]) #ZE; FI
+ ONE_VALUE_TOKEN:
+ IF (imm8[3]) #IE; FI
+ SNAN_TOKEN:
+ IF (imm8[4]) #IE; FI
+ NEG_INF_TOKEN:
+ IF (imm8[5]) #IE; FI
+ NEG_VALUE_TOKEN:
+ IF (imm8[6]) #IE; FI
+ POS_INF_TOKEN:
+ IF (imm8[7]) #IE; FI
+ ESAC
+ RETURN dest[31:0]
+}
+IF k[0]
+ dst[31:0] := FIXUPIMMPD(a[31:0], b[31:0], c[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFIXUPIMMSS" form="xmm {z}, xmm, xmm, imm8" xed="VFIXUPIMMSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="zmm {z}, zmm, zmm" xed="VFMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="zmm {z}, zmm, zmm" xed="VFMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="zmm {z}, zmm, zmm" xed="VFMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="zmm {z}, zmm, zmm {er}" xed="VFMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="zmm {z}, zmm, zmm {er}" xed="VFMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="zmm {z}, zmm, zmm {er}" xed="VFMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="zmm {z}, zmm, zmm" xed="VFMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="zmm {z}, zmm, zmm" xed="VFMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="zmm {z}, zmm, zmm" xed="VFMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "a" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="zmm {z}, zmm, zmm {er}" xed="VFMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="zmm {z}, zmm, zmm {er}" xed="VFMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="zmm {z}, zmm, zmm {er}" xed="VFMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fmadd_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := (a[63:0] * b[63:0]) + c[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SD" form="xmm, xmm, xmm {er}" xed="VFMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD213SD" form="xmm, xmm, xmm {er}" xed="VFMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD231SD" form="xmm, xmm, xmm {er}" xed="VFMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmadd_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := c[63:0]
+FI
+dst[127:64] := c[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SD" form="xmm {k}, xmm, xmm {er}" xed="VFMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD213SD" form="xmm {k}, xmm, xmm {er}" xed="VFMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD231SD" form="xmm {k}, xmm, xmm {er}" xed="VFMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmadd_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := c[63:0]
+FI
+dst[127:64] := c[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SD" form="xmm {k}, xmm, xmm" xed="VFMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD213SD" form="xmm {k}, xmm, xmm" xed="VFMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD231SD" form="xmm {k}, xmm, xmm" xed="VFMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmadd_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := a[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SD" form="xmm {k}, xmm, xmm {er}" xed="VFMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD213SD" form="xmm {k}, xmm, xmm {er}" xed="VFMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD231SD" form="xmm {k}, xmm, xmm {er}" xed="VFMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmadd_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := a[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SD" form="xmm {k}, xmm, xmm" xed="VFMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD213SD" form="xmm {k}, xmm, xmm" xed="VFMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD231SD" form="xmm {k}, xmm, xmm" xed="VFMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmadd_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SD" form="xmm {z}, xmm, xmm {er}" xed="VFMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD213SD" form="xmm {z}, xmm, xmm {er}" xed="VFMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD231SD" form="xmm {z}, xmm, xmm {er}" xed="VFMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmadd_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SD" form="xmm {z}, xmm, xmm" xed="VFMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD213SD" form="xmm {z}, xmm, xmm" xed="VFMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMADD231SD" form="xmm {z}, xmm, xmm" xed="VFMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmadd_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := c[31:0]
+FI
+dst[127:32] := c[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SS" form="xmm {k}, xmm, xmm {er}" xed="VFMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD213SS" form="xmm {k}, xmm, xmm {er}" xed="VFMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD231SS" form="xmm {k}, xmm, xmm {er}" xed="VFMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := c[31:0]
+FI
+dst[127:32] := c[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SS" form="xmm {k}, xmm, xmm" xed="VFMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD213SS" form="xmm {k}, xmm, xmm" xed="VFMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD231SS" form="xmm {k}, xmm, xmm" xed="VFMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fmadd_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := (a[31:0] * b[31:0]) + c[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SS" form="xmm, xmm, xmm {er}" xed="VFMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD213SS" form="xmm, xmm, xmm {er}" xed="VFMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD231SS" form="xmm, xmm, xmm {er}" xed="VFMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmadd_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := a[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SS" form="xmm {k}, xmm, xmm {er}" xed="VFMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD213SS" form="xmm {k}, xmm, xmm {er}" xed="VFMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD231SS" form="xmm {k}, xmm, xmm {er}" xed="VFMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := a[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SS" form="xmm {k}, xmm, xmm" xed="VFMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD213SS" form="xmm {k}, xmm, xmm" xed="VFMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD231SS" form="xmm {k}, xmm, xmm" xed="VFMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmadd_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SS" form="xmm {z}, xmm, xmm {er}" xed="VFMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD213SS" form="xmm {z}, xmm, xmm {er}" xed="VFMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD231SS" form="xmm {z}, xmm, xmm {er}" xed="VFMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SS" form="xmm {z}, xmm, xmm" xed="VFMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD213SS" form="xmm {z}, xmm, xmm" xed="VFMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMADD231SS" form="xmm {z}, xmm, xmm" xed="VFMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="zmm, zmm, zmm" xed="VFMADDSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="zmm, zmm, zmm" xed="VFMADDSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="zmm, zmm, zmm" xed="VFMADDSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fmaddsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="zmm, zmm, zmm {er}" xed="VFMADDSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="zmm, zmm, zmm {er}" xed="VFMADDSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="zmm, zmm, zmm {er}" xed="VFMADDSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask3_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="zmm {k}, zmm, zmm" xed="VFMADDSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="zmm {k}, zmm, zmm" xed="VFMADDSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="zmm {k}, zmm, zmm" xed="VFMADDSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask3_fmaddsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="zmm {k}, zmm, zmm" xed="VFMADDSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="zmm {k}, zmm, zmm" xed="VFMADDSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="zmm {k}, zmm, zmm" xed="VFMADDSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fmaddsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="zmm {z}, zmm, zmm" xed="VFMADDSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="zmm {z}, zmm, zmm" xed="VFMADDSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="zmm {z}, zmm, zmm" xed="VFMADDSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmaddsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="zmm {z}, zmm, zmm {er}" xed="VFMADDSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB213PD" form="zmm {z}, zmm, zmm {er}" xed="VFMADDSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADDSUB231PD" form="zmm {z}, zmm, zmm {er}" xed="VFMADDSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="zmm, zmm, zmm" xed="VFMADDSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="zmm, zmm, zmm" xed="VFMADDSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="zmm, zmm, zmm" xed="VFMADDSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fmaddsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="zmm, zmm, zmm {er}" xed="VFMADDSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="zmm, zmm, zmm {er}" xed="VFMADDSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="zmm, zmm, zmm {er}" xed="VFMADDSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask3_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="zmm {k}, zmm, zmm" xed="VFMADDSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="zmm {k}, zmm, zmm" xed="VFMADDSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="zmm {k}, zmm, zmm" xed="VFMADDSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask3_fmaddsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="zmm {k}, zmm, zmm" xed="VFMADDSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="zmm {k}, zmm, zmm" xed="VFMADDSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="zmm {k}, zmm, zmm" xed="VFMADDSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fmaddsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADDSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="zmm {z}, zmm, zmm" xed="VFMADDSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="zmm {z}, zmm, zmm" xed="VFMADDSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="zmm {z}, zmm, zmm" xed="VFMADDSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmaddsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="zmm {z}, zmm, zmm {er}" xed="VFMADDSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB213PS" form="zmm {z}, zmm, zmm {er}" xed="VFMADDSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADDSUB231PS" form="zmm {z}, zmm, zmm {er}" xed="VFMADDSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="zmm {z}, zmm, zmm" xed="VFMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="zmm {z}, zmm, zmm" xed="VFMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="zmm {z}, zmm, zmm" xed="VFMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="zmm {z}, zmm, zmm {er}" xed="VFMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="zmm {z}, zmm, zmm {er}" xed="VFMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="zmm {z}, zmm, zmm {er}" xed="VFMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="zmm {z}, zmm, zmm" xed="VFMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="zmm {z}, zmm, zmm" xed="VFMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="zmm {z}, zmm, zmm" xed="VFMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="zmm {z}, zmm, zmm {er}" xed="VFMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="zmm {z}, zmm, zmm {er}" xed="VFMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="zmm {z}, zmm, zmm {er}" xed="VFMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fmsub_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := (a[63:0] * b[63:0]) - c[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SD" form="xmm, xmm, xmm {er}" xed="VFMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB213SD" form="xmm, xmm, xmm {er}" xed="VFMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB231SD" form="xmm, xmm, xmm {er}" xed="VFMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmsub_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := c[63:0]
+FI
+dst[127:64] := c[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SD" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB213SD" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB231SD" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmsub_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := c[63:0]
+FI
+dst[127:64] := c[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SD" form="xmm {k}, xmm, xmm" xed="VFMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB213SD" form="xmm {k}, xmm, xmm" xed="VFMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB231SD" form="xmm {k}, xmm, xmm" xed="VFMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmsub_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := a[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SD" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB213SD" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB231SD" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmsub_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := a[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SD" form="xmm {k}, xmm, xmm" xed="VFMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB213SD" form="xmm {k}, xmm, xmm" xed="VFMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB231SD" form="xmm {k}, xmm, xmm" xed="VFMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmsub_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SD" form="xmm {z}, xmm, xmm {er}" xed="VFMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB213SD" form="xmm {z}, xmm, xmm {er}" xed="VFMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB231SD" form="xmm {z}, xmm, xmm {er}" xed="VFMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmsub_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SD" form="xmm {z}, xmm, xmm" xed="VFMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB213SD" form="xmm {z}, xmm, xmm" xed="VFMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFMSUB231SD" form="xmm {z}, xmm, xmm" xed="VFMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fmsub_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := (a[31:0] * b[31:0]) - c[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SS" form="xmm, xmm, xmm {er}" xed="VFMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB213SS" form="xmm, xmm, xmm {er}" xed="VFMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB231SS" form="xmm, xmm, xmm {er}" xed="VFMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmsub_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := c[31:0]
+FI
+dst[127:32] := c[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SS" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB213SS" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB231SS" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fmsub_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := c[31:0]
+FI
+dst[127:32] := c[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SS" form="xmm {k}, xmm, xmm" xed="VFMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB213SS" form="xmm {k}, xmm, xmm" xed="VFMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB231SS" form="xmm {k}, xmm, xmm" xed="VFMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmsub_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := a[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SS" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB213SS" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB231SS" form="xmm {k}, xmm, xmm {er}" xed="VFMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fmsub_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := a[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SS" form="xmm {k}, xmm, xmm" xed="VFMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB213SS" form="xmm {k}, xmm, xmm" xed="VFMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB231SS" form="xmm {k}, xmm, xmm" xed="VFMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmsub_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SS" form="xmm {z}, xmm, xmm {er}" xed="VFMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB213SS" form="xmm {z}, xmm, xmm {er}" xed="VFMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB231SS" form="xmm {z}, xmm, xmm {er}" xed="VFMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fmsub_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SS" form="xmm {z}, xmm, xmm" xed="VFMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB213SS" form="xmm {z}, xmm, xmm" xed="VFMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFMSUB231SS" form="xmm {z}, xmm, xmm" xed="VFMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="zmm, zmm, zmm" xed="VFMSUBADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="zmm, zmm, zmm" xed="VFMSUBADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="zmm, zmm, zmm" xed="VFMSUBADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fmsubadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="zmm, zmm, zmm {er}" xed="VFMSUBADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="zmm, zmm, zmm {er}" xed="VFMSUBADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="zmm, zmm, zmm {er}" xed="VFMSUBADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask3_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="zmm {k}, zmm, zmm" xed="VFMSUBADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="zmm {k}, zmm, zmm" xed="VFMSUBADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="zmm {k}, zmm, zmm" xed="VFMSUBADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask3_fmsubadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="zmm {k}, zmm, zmm" xed="VFMSUBADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="zmm {k}, zmm, zmm" xed="VFMSUBADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="zmm {k}, zmm, zmm" xed="VFMSUBADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fmsubadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="zmm {z}, zmm, zmm" xed="VFMSUBADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="zmm {z}, zmm, zmm" xed="VFMSUBADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="zmm {z}, zmm, zmm" xed="VFMSUBADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmsubadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="zmm {z}, zmm, zmm {er}" xed="VFMSUBADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD213PD" form="zmm {z}, zmm, zmm {er}" xed="VFMSUBADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUBADD231PD" form="zmm {z}, zmm, zmm {er}" xed="VFMSUBADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="zmm, zmm, zmm" xed="VFMSUBADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="zmm, zmm, zmm" xed="VFMSUBADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="zmm, zmm, zmm" xed="VFMSUBADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_fmsubadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="zmm, zmm, zmm {er}" xed="VFMSUBADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="zmm, zmm, zmm {er}" xed="VFMSUBADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="zmm, zmm, zmm {er}" xed="VFMSUBADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask3_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="zmm {k}, zmm, zmm" xed="VFMSUBADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="zmm {k}, zmm, zmm" xed="VFMSUBADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="zmm {k}, zmm, zmm" xed="VFMSUBADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask3_fmsubadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="zmm {k}, zmm, zmm" xed="VFMSUBADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="zmm {k}, zmm, zmm" xed="VFMSUBADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="zmm {k}, zmm, zmm" xed="VFMSUBADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_fmsubadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUBADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="zmm {z}, zmm, zmm" xed="VFMSUBADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="zmm {z}, zmm, zmm" xed="VFMSUBADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="zmm {z}, zmm, zmm" xed="VFMSUBADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fmsubadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="zmm {z}, zmm, zmm {er}" xed="VFMSUBADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD213PS" form="zmm {z}, zmm, zmm {er}" xed="VFMSUBADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUBADD231PS" form="zmm {z}, zmm, zmm {er}" xed="VFMSUBADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="zmm {z}, zmm, zmm" xed="VFNMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="zmm {z}, zmm, zmm" xed="VFNMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="zmm {z}, zmm, zmm" xed="VFNMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fnmadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="zmm {z}, zmm, zmm {er}" xed="VFNMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="zmm {z}, zmm, zmm {er}" xed="VFNMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="zmm {z}, zmm, zmm {er}" xed="VFNMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="zmm {z}, zmm, zmm" xed="VFNMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="zmm {z}, zmm, zmm" xed="VFNMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="zmm {z}, zmm, zmm" xed="VFNMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fnmadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="zmm {z}, zmm, zmm {er}" xed="VFNMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="zmm {z}, zmm, zmm {er}" xed="VFNMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="zmm {z}, zmm, zmm {er}" xed="VFNMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fnmadd_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SD" form="xmm, xmm, xmm {er}" xed="VFNMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD213SD" form="xmm, xmm, xmm {er}" xed="VFNMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD231SD" form="xmm, xmm, xmm {er}" xed="VFNMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmadd_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := c[63:0]
+FI
+dst[127:64] := c[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD213SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD231SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmadd_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := c[63:0]
+FI
+dst[127:64] := c[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SD" form="xmm {k}, xmm, xmm" xed="VFNMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD213SD" form="xmm {k}, xmm, xmm" xed="VFNMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD231SD" form="xmm {k}, xmm, xmm" xed="VFNMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmadd_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := a[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD213SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD231SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmadd_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := a[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SD" form="xmm {k}, xmm, xmm" xed="VFNMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD213SD" form="xmm {k}, xmm, xmm" xed="VFNMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD231SD" form="xmm {k}, xmm, xmm" xed="VFNMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmadd_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SD" form="xmm {z}, xmm, xmm {er}" xed="VFNMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD213SD" form="xmm {z}, xmm, xmm {er}" xed="VFNMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD231SD" form="xmm {z}, xmm, xmm {er}" xed="VFNMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmadd_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD213SD" form="xmm {z}, xmm, xmm" xed="VFNMADD213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD231SD" form="xmm {z}, xmm, xmm" xed="VFNMADD231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMADD132SD" form="xmm {z}, xmm, xmm" xed="VFNMADD132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fnmadd_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SS" form="xmm, xmm, xmm {er}" xed="VFNMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD213SS" form="xmm, xmm, xmm {er}" xed="VFNMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD231SS" form="xmm, xmm, xmm {er}" xed="VFNMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmadd_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := c[31:0]
+FI
+dst[127:32] := c[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD213SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD231SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := c[31:0]
+FI
+dst[127:32] := c[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SS" form="xmm {k}, xmm, xmm" xed="VFNMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD213SS" form="xmm {k}, xmm, xmm" xed="VFNMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD231SS" form="xmm {k}, xmm, xmm" xed="VFNMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmadd_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := a[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD213SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD231SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := a[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SS" form="xmm {k}, xmm, xmm" xed="VFNMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD213SS" form="xmm {k}, xmm, xmm" xed="VFNMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD231SS" form="xmm {k}, xmm, xmm" xed="VFNMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmadd_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SS" form="xmm {z}, xmm, xmm {er}" xed="VFNMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD213SS" form="xmm {z}, xmm, xmm {er}" xed="VFNMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD231SS" form="xmm {z}, xmm, xmm {er}" xed="VFNMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SS" form="xmm {z}, xmm, xmm" xed="VFNMADD132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD213SS" form="xmm {z}, xmm, xmm" xed="VFNMADD213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMADD231SS" form="xmm {z}, xmm, xmm" xed="VFNMADD231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="zmm {z}, zmm, zmm" xed="VFNMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="zmm {z}, zmm, zmm" xed="VFNMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="zmm {z}, zmm, zmm" xed="VFNMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fnmsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="zmm {z}, zmm, zmm {er}" xed="VFNMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="zmm {z}, zmm, zmm {er}" xed="VFNMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="zmm {z}, zmm, zmm {er}" xed="VFNMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="zmm {z}, zmm, zmm" xed="VFNMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="zmm {z}, zmm, zmm" xed="VFNMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="zmm {z}, zmm, zmm" xed="VFNMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_fnmsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="const int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="zmm {z}, zmm, zmm {er}" xed="VFNMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="zmm {z}, zmm, zmm {er}" xed="VFNMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="zmm {z}, zmm, zmm {er}" xed="VFNMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fnmsub_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SD" form="xmm, xmm, xmm {er}" xed="VFNMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB213SD" form="xmm, xmm, xmm {er}" xed="VFNMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB231SD" form="xmm, xmm, xmm {er}" xed="VFNMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmsub_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := c[63:0]
+FI
+dst[127:64] := c[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB213SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB231SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmsub_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "c" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := c[63:0]
+FI
+dst[127:64] := c[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SD" form="xmm {k}, xmm, xmm" xed="VFNMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB213SD" form="xmm {k}, xmm, xmm" xed="VFNMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB231SD" form="xmm {k}, xmm, xmm" xed="VFNMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmsub_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := a[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB213SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB231SD" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmsub_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := a[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SD" form="xmm {k}, xmm, xmm" xed="VFNMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB213SD" form="xmm {k}, xmm, xmm" xed="VFNMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB231SD" form="xmm {k}, xmm, xmm" xed="VFNMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmsub_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SD" form="xmm {z}, xmm, xmm {er}" xed="VFNMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB213SD" form="xmm {z}, xmm, xmm {er}" xed="VFNMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB231SD" form="xmm {z}, xmm, xmm {er}" xed="VFNMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmsub_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SD" form="xmm {z}, xmm, xmm" xed="VFNMSUB132SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB213SD" form="xmm {z}, xmm, xmm" xed="VFNMSUB213SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <instruction name="VFNMSUB231SD" form="xmm {z}, xmm, xmm" xed="VFNMSUB231SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_fnmsub_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", subtract the lower element in "c" from the negated intermediate result, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SS" form="xmm, xmm, xmm {er}" xed="VFNMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB213SS" form="xmm, xmm, xmm {er}" xed="VFNMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB231SS" form="xmm, xmm, xmm {er}" xed="VFNMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmsub_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := c[31:0]
+FI
+dst[127:32] := c[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB213SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB231SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask3_fnmsub_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "c" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := c[31:0]
+FI
+dst[127:32] := c[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SS" form="xmm {k}, xmm, xmm" xed="VFNMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB213SS" form="xmm {k}, xmm, xmm" xed="VFNMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB231SS" form="xmm {k}, xmm, xmm" xed="VFNMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmsub_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := a[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB213SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB231SS" form="xmm {k}, xmm, xmm {er}" xed="VFNMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_fnmsub_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using writemask "k" (the element is copied from "c" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := a[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SS" form="xmm {k}, xmm, xmm" xed="VFNMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB213SS" form="xmm {k}, xmm, xmm" xed="VFNMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB231SS" form="xmm {k}, xmm, xmm" xed="VFNMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmsub_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SS" form="xmm {z}, xmm, xmm {er}" xed="VFNMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB213SS" form="xmm {z}, xmm, xmm {er}" xed="VFNMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB231SS" form="xmm {z}, xmm, xmm {er}" xed="VFNMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_fnmsub_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SS" form="xmm {z}, xmm, xmm" xed="VFNMSUB132SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB213SS" form="xmm {z}, xmm, xmm" xed="VFNMSUB213SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <instruction name="VFNMSUB231SS" form="xmm {z}, xmm, xmm" xed="VFNMSUB231SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i32gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="zmm, vm32y" xed="VGATHERDPD_ZMMf64_MASKmskw_MEMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i32gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="zmm {k}, vm32y" xed="VGATHERDPD_ZMMf64_MASKmskw_MEMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i64gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERQPD" form="zmm, vm32z" xed="VGATHERQPD_ZMMf64_MASKmskw_MEMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i64gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather double-precision (64-bit) floating-point elements from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERQPD" form="zmm {k}, vm32z" xed="VGATHERQPD_ZMMf64_MASKmskw_MEMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i64gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGATHERQPS" form="ymm, vm64z" xed="VGATHERQPS_YMMf32_MASKmskw_MEMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i64gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGATHERQPS" form="ymm {k}, vm64z" xed="VGATHERQPS_YMMf32_MASKmskw_MEMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_getexp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="zmm {z}, zmm" xed="VGETEXPPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_getexp_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.
+ [sae_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="zmm {z}, zmm {sae}" xed="VGETEXPPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_getexp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="zmm {z}, zmm" xed="VGETEXPPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_getexp_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.
+ [sae_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="zmm {z}, zmm {sae}" xed="VGETEXPPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getexp_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.
+ [sae_note]</description>
+ <operation>dst[63:0] := ConvertExpFP64(b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSD" form="xmm, xmm, xmm {sae}" xed="VGETEXPSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getexp_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.</description>
+ <operation>dst[63:0] := ConvertExpFP64(b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSD" form="xmm, xmm, xmm" xed="VGETEXPSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getexp_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.
+ [sae_note]</description>
+ <operation>IF k[0]
+ dst[63:0] := ConvertExpFP64(b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSD" form="xmm {k}, xmm, xmm {sae}" xed="VGETEXPSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getexp_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.</description>
+ <operation>IF k[0]
+ dst[63:0] := ConvertExpFP64(b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSD" form="xmm {k}, xmm, xmm" xed="VGETEXPSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getexp_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.
+ [sae_note]</description>
+ <operation>IF k[0]
+ dst[63:0] := ConvertExpFP64(b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSD" form="xmm {z}, xmm, xmm {sae}" xed="VGETEXPSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getexp_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Convert the exponent of the lower double-precision (64-bit) floating-point element in "b" to a double-precision (64-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.</description>
+ <operation>IF k[0]
+ dst[63:0] := ConvertExpFP64(b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSD" form="xmm {z}, xmm, xmm" xed="VGETEXPSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getexp_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.
+ [sae_note]</description>
+ <operation>dst[31:0] := ConvertExpFP32(b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSS" form="xmm, xmm, xmm {sae}" xed="VGETEXPSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getexp_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.</description>
+ <operation>dst[31:0] := ConvertExpFP32(b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSS" form="xmm, xmm, xmm" xed="VGETEXPSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getexp_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.
+ [sae_note]</description>
+ <operation>IF k[0]
+ dst[31:0] := ConvertExpFP32(b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSS" form="xmm {k}, xmm, xmm {sae}" xed="VGETEXPSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getexp_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.</description>
+ <operation>IF k[0]
+ dst[31:0] := ConvertExpFP32(b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSS" form="xmm {k}, xmm, xmm" xed="VGETEXPSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getexp_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.
+ [sae_note]</description>
+ <operation>IF k[0]
+ dst[31:0] := ConvertExpFP32(b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSS" form="xmm {z}, xmm, xmm {sae}" xed="VGETEXPSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getexp_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Convert the exponent of the lower single-precision (32-bit) floating-point element in "b" to a single-precision (32-bit) floating-point number representing the integer exponent, store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "floor(log2(x))" for the lower element.</description>
+ <operation>IF k[0]
+ dst[31:0] := ConvertExpFP32(b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETEXPSS" form="xmm {z}, xmm, xmm" xed="VGETEXPSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_getmant_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="zmm {z}, zmm, imm8" xed="VGETMANTPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_getmant_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="zmm {z}, zmm, imm8 {sae}" xed="VGETMANTPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_getmant_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="zmm {z}, zmm, imm8" xed="VGETMANTPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_getmant_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="zmm {z}, zmm, imm8 {sae}" xed="VGETMANTPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getmant_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv)
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSD" form="xmm, xmm, xmm, imm8 {sae}" xed="VGETMANTSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getmant_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv)
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSD" form="xmm, xmm, xmm, imm8" xed="VGETMANTSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getmant_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>IF k[0]
+ dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv)
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSD" form="xmm {k}, xmm, xmm, imm8 {sae}" xed="VGETMANTSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getmant_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>IF k[0]
+ dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv)
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSD" form="xmm {k}, xmm, xmm, imm8" xed="VGETMANTSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getmant_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>IF k[0]
+ dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv)
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSD" form="xmm {z}, xmm, xmm, imm8 {sae}" xed="VGETMANTSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getmant_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>IF k[0]
+ dst[63:0] := GetNormalizedMantissa(b[63:0], sc, interv)
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSD" form="xmm {z}, xmm, xmm, imm8" xed="VGETMANTSD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getmant_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv)
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSS" form="xmm, xmm, xmm, imm8 {sae}" xed="VGETMANTSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_getmant_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv)
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSS" form="xmm, xmm, xmm, imm8" xed="VGETMANTSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getmant_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>IF k[0]
+ dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv)
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSS" form="xmm {k}, xmm, xmm, imm8 {sae}" xed="VGETMANTSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_getmant_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>IF k[0]
+ dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv)
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSS" form="xmm {k}, xmm, xmm, imm8" xed="VGETMANTSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getmant_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>IF k[0]
+ dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv)
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSS" form="xmm {z}, xmm, xmm, imm8 {sae}" xed="VGETMANTSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_getmant_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>IF k[0]
+ dst[31:0] := GetNormalizedMantissa(b[31:0], sc, interv)
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGETMANTSS" form="xmm {z}, xmm, xmm, imm8" xed="VGETMANTSS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_insertf32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "dst", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[511:0] := a[511:0]
+CASE (imm8[1:0]) OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+2: dst[383:256] := b[127:0]
+3: dst[511:384] := b[127:0]
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF32X4" form="zmm, zmm, xmm, imm8" xed="VINSERTF32X4_ZMMf32_MASKmskw_ZMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_insertf32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[1:0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+2: tmp[383:256] := b[127:0]
+3: tmp[511:384] := b[127:0]
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF32X4" form="zmm {k}, zmm, xmm, imm8" xed="VINSERTF32X4_ZMMf32_MASKmskw_ZMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_insertf32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[1:0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+2: tmp[383:256] := b[127:0]
+3: tmp[511:384] := b[127:0]
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF32X4" form="zmm {z}, zmm, xmm, imm8" xed="VINSERTF32X4_ZMMf32_MASKmskw_ZMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_insertf64x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[511:0] := a[511:0]
+CASE (imm8[0]) OF
+0: dst[255:0] := b[255:0]
+1: dst[511:256] := b[255:0]
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF64X4" form="zmm, zmm, ymm, imm8" xed="VINSERTF64X4_ZMMf64_MASKmskw_ZMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_insertf64x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[0]) OF
+0: tmp[255:0] := b[255:0]
+1: tmp[511:256] := b[255:0]
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF64X4" form="zmm {k}, zmm, ymm, imm8" xed="VINSERTF64X4_ZMMf64_MASKmskw_ZMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_insertf64x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 256 bits (composed of 4 packed double-precision (64-bit) floating-point elements) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[0]) OF
+0: tmp[255:0] := b[255:0]
+1: tmp[511:256] := b[255:0]
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTF64X4" form="zmm {z}, zmm, ymm, imm8" xed="VINSERTF64X4_ZMMf64_MASKmskw_ZMMf64_YMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_inserti32x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "dst", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[511:0] := a[511:0]
+CASE (imm8[1:0]) OF
+0: dst[127:0] := b[127:0]
+1: dst[255:128] := b[127:0]
+2: dst[383:256] := b[127:0]
+3: dst[511:384] := b[127:0]
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI32X4" form="zmm, zmm, xmm, imm8" xed="VINSERTI32X4_ZMMu32_MASKmskw_ZMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_inserti32x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[1:0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+2: tmp[383:256] := b[127:0]
+3: tmp[511:384] := b[127:0]
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI32X4" form="zmm {k}, zmm, xmm, imm8" xed="VINSERTI32X4_ZMMu32_MASKmskw_ZMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_inserti32x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "tmp", then insert 128 bits (composed of 4 packed 32-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[1:0]) OF
+0: tmp[127:0] := b[127:0]
+1: tmp[255:128] := b[127:0]
+2: tmp[383:256] := b[127:0]
+3: tmp[511:384] := b[127:0]
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI32X4" form="zmm {z}, zmm, xmm, imm8" xed="VINSERTI32X4_ZMMu32_MASKmskw_ZMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_inserti64x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", then insert 256 bits (composed of 4 packed 64-bit integers) from "b" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[511:0] := a[511:0]
+CASE (imm8[0]) OF
+0: dst[255:0] := b[255:0]
+1: dst[511:256] := b[255:0]
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI64X4" form="zmm, zmm, ymm, imm8" xed="VINSERTI64X4_ZMMu64_MASKmskw_ZMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_inserti64x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 256 bits (composed of 4 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[0]) OF
+0: tmp[255:0] := b[255:0]
+1: tmp[511:256] := b[255:0]
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI64X4" form="zmm {k}, zmm, ymm, imm8" xed="VINSERTI64X4_ZMMu64_MASKmskw_ZMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_inserti64x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "tmp", then insert 256 bits (composed of 4 packed 64-bit integers) from "b" into "tmp" at the location specified by "imm8". Store "tmp" to "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[511:0] := a[511:0]
+CASE (imm8[0]) OF
+0: tmp[255:0] := b[255:0]
+1: tmp[511:256] := b[255:0]
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VINSERTI64X4" form="zmm {z}, zmm, ymm, imm8" xed="VINSERTI64X4_ZMMu64_MASKmskw_ZMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_max_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPD" form="zmm {k}, zmm, zmm" xed="VMAXPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_max_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPD" form="zmm {k}, zmm, zmm {sae}" xed="VMAXPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPD" form="zmm {z}, zmm, zmm" xed="VMAXPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPD" form="zmm {z}, zmm, zmm {sae}" xed="VMAXPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_max_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPD" form="zmm, zmm, zmm" xed="VMAXPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_max_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPD" form="zmm, zmm, zmm {sae}" xed="VMAXPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_max_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPS" form="zmm {k}, zmm, zmm" xed="VMAXPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_max_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPS" form="zmm {k}, zmm, zmm {sae}" xed="VMAXPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPS" form="zmm {z}, zmm, zmm" xed="VMAXPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPS" form="zmm {z}, zmm, zmm {sae}" xed="VMAXPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_max_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPS" form="zmm, zmm, zmm" xed="VMAXPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_max_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMAXPS" form="zmm, zmm, zmm {sae}" xed="VMAXPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := MAX(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXSD" form="xmm {k}, xmm, xmm {sae}" xed="VMAXSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := MAX(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXSD" form="xmm {k}, xmm, xmm" xed="VMAXSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := MAX(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXSD" form="xmm {z}, xmm, xmm {sae}" xed="VMAXSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := MAX(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXSD" form="xmm {z}, xmm, xmm" xed="VMAXSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_max_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [sae_note]</description>
+ <operation>
+dst[63:0] := MAX(a[63:0], b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXSD" form="xmm, xmm, xmm {sae}" xed="VMAXSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := MAX(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXSS" form="xmm {k}, xmm, xmm {sae}" xed="VMAXSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_max_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := MAX(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXSS" form="xmm {k}, xmm, xmm" xed="VMAXSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := MAX(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXSS" form="xmm {z}, xmm, xmm {sae}" xed="VMAXSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_max_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := MAX(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXSS" form="xmm {z}, xmm, xmm" xed="VMAXSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_max_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note]</description>
+ <operation>
+dst[31:0] := MAX(a[31:0], b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMAXSS" form="xmm, xmm, xmm {sae}" xed="VMAXSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_min_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPD" form="zmm {k}, zmm, zmm" xed="VMINPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_min_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPD" form="zmm {k}, zmm, zmm {sae}" xed="VMINPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPD" form="zmm {z}, zmm, zmm" xed="VMINPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPD" form="zmm {z}, zmm, zmm {sae}" xed="VMINPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_min_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPD" form="zmm, zmm, zmm" xed="VMINPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_min_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPD" form="zmm, zmm, zmm {sae}" xed="VMINPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_min_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPS" form="zmm {k}, zmm, zmm" xed="VMINPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_min_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPS" form="zmm {k}, zmm, zmm {sae}" xed="VMINPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPS" form="zmm {z}, zmm, zmm" xed="VMINPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPS" form="zmm {z}, zmm, zmm {sae}" xed="VMINPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_min_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPS" form="zmm, zmm, zmm" xed="VMINPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_min_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst". [sae_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMINPS" form="zmm, zmm, zmm {sae}" xed="VMINPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := MIN(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINSD" form="xmm {k}, xmm, xmm {sae}" xed="VMINSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := MIN(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINSD" form="xmm {k}, xmm, xmm" xed="VMINSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := MIN(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINSD" form="xmm {z}, xmm, xmm {sae}" xed="VMINSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := MIN(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINSD" form="xmm {z}, xmm, xmm" xed="VMINSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_min_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" , and copy the upper element from "a" to the upper element of "dst". [sae_note]</description>
+ <operation>
+dst[63:0] := MIN(a[63:0], b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINSD" form="xmm, xmm, xmm {sae}" xed="VMINSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := MIN(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINSS" form="xmm {k}, xmm, xmm {sae}" xed="VMINSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_min_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := MIN(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINSS" form="xmm {k}, xmm, xmm" xed="VMINSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := MIN(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINSS" form="xmm {z}, xmm, xmm {sae}" xed="VMINSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_min_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := MIN(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINSS" form="xmm {z}, xmm, xmm" xed="VMINSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_min_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [sae_note]</description>
+ <operation>
+dst[31:0] := MIN(a[31:0], b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMINSS" form="xmm, xmm, xmm {sae}" xed="VMINSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_load_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="zmm {z}, m512" xed="VMOVAPD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mov_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Move packed double-precision (64-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="zmm {z}, zmm" xed="VMOVAPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_load_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="zmm {z}, m512" xed="VMOVAPS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mov_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Move packed single-precision (32-bit) floating-point elements from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="zmm {z}, zmm" xed="VMOVAPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_movedup_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[63:0] := a[63:0]
+tmp[127:64] := a[63:0]
+tmp[191:128] := a[191:128]
+tmp[255:192] := a[191:128]
+tmp[319:256] := a[319:256]
+tmp[383:320] := a[319:256]
+tmp[447:384] := a[447:384]
+tmp[511:448] := a[447:384]
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDDUP" form="zmm {k}, zmm" xed="VMOVDDUP_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_movedup_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[63:0] := a[63:0]
+tmp[127:64] := a[63:0]
+tmp[191:128] := a[191:128]
+tmp[255:192] := a[191:128]
+tmp[319:256] := a[319:256]
+tmp[383:320] := a[319:256]
+tmp[447:384] := a[447:384]
+tmp[511:448] := a[447:384]
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDDUP" form="zmm {z}, zmm" xed="VMOVDDUP_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_movedup_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Duplicate even-indexed double-precision (64-bit) floating-point elements from "a", and store the results in "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[127:64] := a[63:0]
+dst[191:128] := a[191:128]
+dst[255:192] := a[191:128]
+dst[319:256] := a[319:256]
+dst[383:320] := a[319:256]
+dst[447:384] := a[447:384]
+dst[511:448] := a[447:384]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDDUP" form="zmm, zmm" xed="VMOVDDUP_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_load_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <description>Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="zmm {z}, m512" xed="VMOVDQA32_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mov_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Move packed 32-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="zmm {z}, zmm" xed="VMOVDQA32_ZMMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_load_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <description>Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="zmm {z}, m512" xed="VMOVDQA64_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mov_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Move packed 64-bit integers from "a" into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="zmm {z}, zmm" xed="VMOVDQA64_ZMMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_loadu_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <description>Load 512-bits of integer data from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU32" form="zmm, m512" xed="VMOVDQU32_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_loadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <description>Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU32" form="zmm {k}, m512" xed="VMOVDQU32_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Store packed 32-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU32" form="m512 {k}, zmm" xed="VMOVDQU32_MEMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_loadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <description>Load packed 32-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU32" form="zmm {z}, m512" xed="VMOVDQU32_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_storeu_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="M512" memwidth="512"/>
+ <parameter type="__m512i" varname="a" etype="M512"/>
+ <description>Store 512-bits of integer data from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVDQU32" form="m512, zmm" xed="VMOVDQU32_MEMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_loadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <description>Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU64" form="zmm {k}, m512" xed="VMOVDQU64_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_storeu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Store packed 64-bit integers from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQU64" form="m512 {k}, zmm" xed="VMOVDQU64_MEMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_loadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <description>Load packed 64-bit integers from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQU64" form="zmm {z}, m512" xed="VMOVDQU64_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_stream_load_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="void const*" varname="mem_addr" etype="M512" memwidth="512"/>
+ <description>Load 512-bits of integer data from memory into "dst" using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVNTDQA" form="zmm, m512" xed="VMOVNTDQA_ZMMu32_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_stream_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="M512" memwidth="512"/>
+ <parameter type="__m512i" varname="a" etype="M512"/>
+ <description>Store 512-bits of integer data from "a" into memory using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVNTDQ" form="m512, zmm" xed="VMOVNTDQ_MEMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_stream_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from "a" into memory using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVNTPD" form="m512, zmm" xed="VMOVNTPD_MEMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_stream_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from "a" into memory using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVNTPS" form="m512, zmm" xed="VMOVNTPS_MEMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_load_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="const double*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <description>Load a double-precision (64-bit) floating-point element from memory into the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and set the upper element of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+IF k[0]
+ dst[63:0] := MEM[mem_addr+63:mem_addr]
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VMOVSD" form="xmm {k}, m64" xed="VMOVSD_XMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_move_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Move the lower double-precision (64-bit) floating-point element from "b" to the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := b[63:0]
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVSD" form="xmm {k}, xmm, xmm" xed="VMOVSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_store_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store the lower double-precision (64-bit) floating-point element from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+IF k[0]
+ MEM[mem_addr+63:mem_addr] := a[63:0]
+FI
+ </operation>
+ <instruction name="VMOVSD" form="m64 {k}, xmm" xed="VMOVSD_MEMf64_MASKmskw_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_load_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="const double*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <description>Load a double-precision (64-bit) floating-point element from memory into the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and set the upper element of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+IF k[0]
+ dst[63:0] := MEM[mem_addr+63:mem_addr]
+ELSE
+ dst[63:0] := 0
+FI
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VMOVSD" form="xmm {z}, m64" xed="VMOVSD_XMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_move_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Move the lower double-precision (64-bit) floating-point element from "b" to the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := b[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVSD" form="xmm {z}, xmm, xmm" xed="VMOVSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_movehdup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[63:32]
+tmp[63:32] := a[63:32]
+tmp[95:64] := a[127:96]
+tmp[127:96] := a[127:96]
+tmp[159:128] := a[191:160]
+tmp[191:160] := a[191:160]
+tmp[223:192] := a[255:224]
+tmp[255:224] := a[255:224]
+tmp[287:256] := a[319:288]
+tmp[319:288] := a[319:288]
+tmp[351:320] := a[383:352]
+tmp[383:352] := a[383:352]
+tmp[415:384] := a[447:416]
+tmp[447:416] := a[447:416]
+tmp[479:448] := a[511:480]
+tmp[511:480] := a[511:480]
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVSHDUP" form="zmm {k}, zmm" xed="VMOVSHDUP_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_movehdup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[63:32]
+tmp[63:32] := a[63:32]
+tmp[95:64] := a[127:96]
+tmp[127:96] := a[127:96]
+tmp[159:128] := a[191:160]
+tmp[191:160] := a[191:160]
+tmp[223:192] := a[255:224]
+tmp[255:224] := a[255:224]
+tmp[287:256] := a[319:288]
+tmp[319:288] := a[319:288]
+tmp[351:320] := a[383:352]
+tmp[383:352] := a[383:352]
+tmp[415:384] := a[447:416]
+tmp[447:416] := a[447:416]
+tmp[479:448] := a[511:480]
+tmp[511:480] := a[511:480]
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVSHDUP" form="zmm {z}, zmm" xed="VMOVSHDUP_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_movehdup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst".</description>
+ <operation>
+dst[31:0] := a[63:32]
+dst[63:32] := a[63:32]
+dst[95:64] := a[127:96]
+dst[127:96] := a[127:96]
+dst[159:128] := a[191:160]
+dst[191:160] := a[191:160]
+dst[223:192] := a[255:224]
+dst[255:224] := a[255:224]
+dst[287:256] := a[319:288]
+dst[319:288] := a[319:288]
+dst[351:320] := a[383:352]
+dst[383:352] := a[383:352]
+dst[415:384] := a[447:416]
+dst[447:416] := a[447:416]
+dst[479:448] := a[511:480]
+dst[511:480] := a[511:480]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVSHDUP" form="zmm, zmm" xed="VMOVSHDUP_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_moveldup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[31:0]
+tmp[63:32] := a[31:0]
+tmp[95:64] := a[95:64]
+tmp[127:96] := a[95:64]
+tmp[159:128] := a[159:128]
+tmp[191:160] := a[159:128]
+tmp[223:192] := a[223:192]
+tmp[255:224] := a[223:192]
+tmp[287:256] := a[287:256]
+tmp[319:288] := a[287:256]
+tmp[351:320] := a[351:320]
+tmp[383:352] := a[351:320]
+tmp[415:384] := a[415:384]
+tmp[447:416] := a[415:384]
+tmp[479:448] := a[479:448]
+tmp[511:480] := a[479:448]
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVSLDUP" form="zmm {k}, zmm" xed="VMOVSLDUP_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_moveldup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp[31:0] := a[31:0]
+tmp[63:32] := a[31:0]
+tmp[95:64] := a[95:64]
+tmp[127:96] := a[95:64]
+tmp[159:128] := a[159:128]
+tmp[191:160] := a[159:128]
+tmp[223:192] := a[223:192]
+tmp[255:224] := a[223:192]
+tmp[287:256] := a[287:256]
+tmp[319:288] := a[287:256]
+tmp[351:320] := a[351:320]
+tmp[383:352] := a[351:320]
+tmp[415:384] := a[415:384]
+tmp[447:416] := a[415:384]
+tmp[479:448] := a[479:448]
+tmp[511:480] := a[479:448]
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVSLDUP" form="zmm {z}, zmm" xed="VMOVSLDUP_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_moveldup_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+dst[63:32] := a[31:0]
+dst[95:64] := a[95:64]
+dst[127:96] := a[95:64]
+dst[159:128] := a[159:128]
+dst[191:160] := a[159:128]
+dst[223:192] := a[223:192]
+dst[255:224] := a[223:192]
+dst[287:256] := a[287:256]
+dst[319:288] := a[287:256]
+dst[351:320] := a[351:320]
+dst[383:352] := a[351:320]
+dst[415:384] := a[415:384]
+dst[447:416] := a[415:384]
+dst[479:448] := a[479:448]
+dst[511:480] := a[479:448]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVSLDUP" form="zmm, zmm" xed="VMOVSLDUP_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_load_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="const float*" varname="mem_addr" etype="FP32" memwidth="32"/>
+ <description>Load a single-precision (32-bit) floating-point element from memory into the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and set the upper elements of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+IF k[0]
+ dst[31:0] := MEM[mem_addr+31:mem_addr]
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VMOVSS" form="xmm {k}, m32" xed="VMOVSS_XMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_move_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Move the lower single-precision (32-bit) floating-point element from "b" to the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := b[31:0]
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVSS" form="xmm {k}, xmm, xmm" xed="VMOVSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_store_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float*" varname="mem_addr" etype="FP32" memwidth="32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store the lower single-precision (32-bit) floating-point element from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+IF k[0]
+ MEM[mem_addr+31:mem_addr] := a[31:0]
+FI
+ </operation>
+ <instruction name="VMOVSS" form="m32 {k}, xmm" xed="VMOVSS_MEMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_load_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="const float*" varname="mem_addr" etype="FP32" memwidth="32"/>
+ <description>Load a single-precision (32-bit) floating-point element from memory into the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and set the upper elements of "dst" to zero. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+IF k[0]
+ dst[31:0] := MEM[mem_addr+31:mem_addr]
+ELSE
+ dst[31:0] := 0
+FI
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VMOVSS" form="xmm {z}, m32" xed="VMOVSS_XMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_move_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Move the lower single-precision (32-bit) floating-point element from "b" to the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := b[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMOVSS" form="xmm {z}, xmm, xmm" xed="VMOVSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_loadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <description>Load 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVUPD" form="zmm, m512" xed="VMOVUPD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_loadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVUPD" form="zmm {k}, m512" xed="VMOVUPD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_storeu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVUPD" form="m512 {k}, zmm" xed="VMOVUPD_MEMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_loadu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memoy into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVUPD" form="zmm {z}, m512" xed="VMOVUPD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_storeu_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVUPD" form="m512, zmm" xed="VMOVUPD_MEMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_loadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <description>Load 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVUPS" form="zmm, m512" xed="VMOVUPS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_loadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVUPS" form="zmm {k}, m512" xed="VMOVUPS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_storeu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVUPS" form="m512 {k}, zmm" xed="VMOVUPS_MEMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_loadu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVUPS" form="zmm {z}, m512" xed="VMOVUPS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_storeu_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVUPS" form="m512, zmm" xed="VMOVUPS_MEMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mul_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPD" form="zmm {z}, zmm, zmm" xed="VMULPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mul_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPD" form="zmm {z}, zmm, zmm {er}" xed="VMULPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mul_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPS" form="zmm {z}, zmm, zmm" xed="VMULPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mul_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPS" form="zmm {z}, zmm, zmm {er}" xed="VMULPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mul_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] * b[63:0]
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULSD" form="xmm {k}, xmm, xmm {er}" xed="VMULSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mul_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] * b[63:0]
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULSD" form="xmm {k}, xmm, xmm" xed="VMULSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mul_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] * b[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULSD" form="xmm {z}, xmm, xmm {er}" xed="VMULSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mul_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] * b[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULSD" form="xmm {z}, xmm, xmm" xed="VMULSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mul_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := a[63:0] * b[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULSD" form="xmm, xmm, xmm {er}" xed="VMULSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mul_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] * b[31:0]
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULSS" form="xmm {k}, xmm, xmm {er}" xed="VMULSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_mul_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] * b[31:0]
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULSS" form="xmm {k}, xmm, xmm" xed="VMULSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mul_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] * b[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULSS" form="xmm {z}, xmm, xmm {er}" xed="VMULSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_mul_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] * b[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULSS" form="xmm {z}, xmm, xmm" xed="VMULSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mul_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := a[31:0] * b[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VMULSS" form="xmm, xmm, xmm {er}" xed="VMULSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_abs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ABS(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSD" form="zmm, zmm" xed="VPABSD_ZMMi32_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_abs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ABS(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSD" form="zmm {k}, zmm" xed="VPABSD_ZMMi32_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_abs_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ABS(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSD" form="zmm {z}, zmm" xed="VPABSD_ZMMi32_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_abs_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ABS(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSQ" form="zmm, zmm" xed="VPABSQ_ZMMi64_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_abs_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ABS(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSQ" form="zmm {k}, zmm" xed="VPABSQ_ZMMi64_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_abs_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Compute the absolute value of packed signed 64-bit integers in "a", and store the unsigned results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ABS(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPABSQ" form="zmm {z}, zmm" xed="VPABSQ_ZMMi64_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_add_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Add packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDD" form="zmm {z}, zmm, zmm" xed="VPADDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_add_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Add packed 64-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDQ" form="zmm, zmm, zmm" xed="VPADDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_add_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Add packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDQ" form="zmm {k}, zmm, zmm" xed="VPADDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_add_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Add packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDQ" form="zmm {z}, zmm, zmm" xed="VPADDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_and_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] AND b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDD" form="zmm {z}, zmm, zmm" xed="VPANDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_andnot_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDND" form="zmm {z}, zmm, zmm" xed="VPANDND_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_andnot_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (NOT a[i+63:i]) AND b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDNQ" form="zmm {z}, zmm, zmm" xed="VPANDNQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_and_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] AND b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDQ" form="zmm {z}, zmm, zmm" xed="VPANDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set1_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="char" varname="a" etype="UI8"/>
+ <description>Broadcast 8-bit integer "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := a[7:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTB" form="zmm, r8" xed="VPBROADCASTB_ZMMu8_MASKmskw_GPR32u8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcastd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the low packed 32-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="zmm, xmm" xed="VPBROADCASTD_ZMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcastd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="zmm {k}, xmm" xed="VPBROADCASTD_ZMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_set1_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Broadcast 32-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="zmm {k}, r32" xed="VPBROADCASTD_ZMMu32_MASKmskw_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcastd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Broadcast the low packed 32-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="zmm {z}, xmm" xed="VPBROADCASTD_ZMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_set1_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Broadcast 32-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[31:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="zmm {z}, r32" xed="VPBROADCASTD_ZMMu32_MASKmskw_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_set1_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Broadcast 32-bit integer "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTD" form="zmm, r32" xed="VPBROADCASTD_ZMMu32_MASKmskw_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_broadcastq_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the low packed 64-bit integer from "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="zmm, xmm" xed="VPBROADCASTQ_ZMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_broadcastq_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="zmm {k}, xmm" xed="VPBROADCASTQ_ZMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_set1_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Broadcast 64-bit integer "a" to all elements of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="zmm {k}, r64" xed="VPBROADCASTQ_ZMMu64_MASKmskw_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_broadcastq_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Broadcast the low packed 64-bit integer from "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="zmm {z}, xmm" xed="VPBROADCASTQ_ZMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_set1_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Broadcast 64-bit integer "a" to all elements of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[63:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="zmm {z}, r64" xed="VPBROADCASTQ_ZMMu64_MASKmskw_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_set1_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Broadcast 64-bit integer "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTQ" form="zmm, r64" xed="VPBROADCASTQ_ZMMu64_MASKmskw_GPR64u64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set1_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="short" varname="a" etype="UI16"/>
+ <description>Broadcast the low packed 16-bit integer from "a" to all all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := a[15:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBROADCASTW" form="zmm, r16" xed="VPBROADCASTW_ZMMu16_MASKmskw_GPR32u16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmplt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, zmm, zmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmplt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, zmm, zmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmp_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, zmm, zmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpeq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPEQQ" form="k, zmm, zmm" xed="VPCMPEQQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpge_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, zmm, zmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpgt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPGTQ" form="k, zmm, zmm" xed="VPCMPGTQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmple_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, zmm, zmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmplt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, zmm, zmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpneq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k, zmm, zmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmp_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpeq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPEQQ" form="k {k}, zmm, zmm" xed="VPCMPEQQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpge_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpgt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPGTQ" form="k {k}, zmm, zmm" xed="VPCMPGTQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmple_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmplt_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpneq_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPQ_MASKmskw_MASKmskw_ZMMi64_ZMMi64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmp_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpeq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpge_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpgt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmple_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmplt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cmpneq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmp_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpeq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] == b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpge_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpgt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &gt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmple_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt;= b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmplt_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] &lt; b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cmpneq_epu64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] != b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPCMPUQ" form="k {k}, zmm, zmm, imm8" xed="VPCMPUQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compress_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := src[511:m]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCOMPRESSD" form="zmm {k}, zmm" xed="VPCOMPRESSD_ZMMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compressstoreu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <category>Swizzle</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32" memwidth="512"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Contiguously store the active 32-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 32
+m := base_addr
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ MEM[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSD" form="m32 {k}, zmm" xed="VPCOMPRESSD_MEMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_compress_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Contiguously store the active 32-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 32
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[m+size-1:m] := a[i+31:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := 0
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCOMPRESSD" form="zmm {z}, zmm" xed="VPCOMPRESSD_ZMMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compress_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := src[511:m]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCOMPRESSQ" form="zmm {k}, zmm" xed="VPCOMPRESSQ_ZMMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compressstoreu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <category>Swizzle</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64" memwidth="512"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Contiguously store the active 64-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 64
+m := base_addr
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ MEM[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSQ" form="m64 {k}, zmm" xed="VPCOMPRESSQ_MEMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_compress_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Contiguously store the active 64-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 64
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[m+size-1:m] := a[i+63:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := 0
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCOMPRESSQ" form="zmm {z}, zmm" xed="VPCOMPRESSQ_ZMMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutexvar_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ id := idx[i+3:i]*32
+ IF k[j]
+ dst[i+31:i] := a[id+31:id]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMD" form="zmm {k}, zmm, zmm" xed="VPERMD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutexvar_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ id := idx[i+3:i]*32
+ IF k[j]
+ dst[i+31:i] := a[id+31:id]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMD" form="zmm {z}, zmm, zmm" xed="VPERMD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutexvar_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ id := idx[i+3:i]*32
+ dst[i+31:i] := a[id+31:id]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMD" form="zmm, zmm, zmm" xed="VPERMD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask2_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ off := idx[i+3:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := idx[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2D" form="zmm {k}, zmm, zmm" xed="VPERMI2D_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ off := idx[i+3:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMT2D" form="zmm {k}, zmm, zmm" xed="VPERMT2D_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ off := idx[i+3:i]*32
+ IF k[j]
+ dst[i+31:i] := (idx[i+4]) ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2D" form="zmm {z}, zmm, zmm" xed="VPERMI2D_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <instruction name="VPERMT2D" form="zmm {z}, zmm, zmm" xed="VPERMT2D_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutex2var_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ off := idx[i+3:i]*32
+ dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2D" form="zmm, zmm, zmm" xed="VPERMI2D_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <instruction name="VPERMT2D" form="zmm, zmm, zmm" xed="VPERMT2D_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask2_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set)</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ off := idx[i+2:i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := idx[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2PD" form="zmm {k}, zmm, zmm" xed="VPERMI2PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ off := idx[i+2:i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMT2PD" form="zmm {k}, zmm, zmm" xed="VPERMT2PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ off := idx[i+2:i]*64
+ IF k[j]
+ dst[i+63:i] := (idx[i+3]) ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2PD" form="zmm {z}, zmm, zmm" xed="VPERMI2PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VPERMT2PD" form="zmm {z}, zmm, zmm" xed="VPERMT2PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutex2var_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ off := idx[i+2:i]*64
+ dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2PD" form="zmm, zmm, zmm" xed="VPERMI2PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VPERMT2PD" form="zmm, zmm, zmm" xed="VPERMT2PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask2_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ off := idx[i+3:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := idx[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2PS" form="zmm {k}, zmm, zmm" xed="VPERMI2PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ off := idx[i+3:i]*32
+ IF k[j]
+ dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMT2PS" form="zmm {k}, zmm, zmm" xed="VPERMT2PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ off := idx[i+3:i]*32
+ IF k[j]
+ dst[i+31:i] := (idx[i+4]) ? b[off+31:off] : a[off+31:off]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2PS" form="zmm {z}, zmm, zmm" xed="VPERMI2PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VPERMT2PS" form="zmm {z}, zmm, zmm" xed="VPERMT2PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutex2var_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ off := idx[i+3:i]*32
+ dst[i+31:i] := idx[i+4] ? b[off+31:off] : a[off+31:off]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2PS" form="zmm, zmm, zmm" xed="VPERMI2PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VPERMT2PS" form="zmm, zmm, zmm" xed="VPERMT2PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask2_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "idx" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ off := idx[i+2:i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := idx[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2Q" form="zmm {k}, zmm, zmm" xed="VPERMI2Q_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ off := idx[i+2:i]*64
+ IF k[j]
+ dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMT2Q" form="zmm {k}, zmm, zmm" xed="VPERMT2Q_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ off := idx[i+2:i]*64
+ IF k[j]
+ dst[i+63:i] := (idx[i+3]) ? b[off+63:off] : a[off+63:off]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2Q" form="zmm {z}, zmm, zmm" xed="VPERMI2Q_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <instruction name="VPERMT2Q" form="zmm {z}, zmm, zmm" xed="VPERMT2Q_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutex2var_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ off := idx[i+2:i]*64
+ dst[i+63:i] := idx[i+3] ? b[off+63:off] : a[off+63:off]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2Q" form="zmm, zmm, zmm" xed="VPERMI2Q_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <instruction name="VPERMT2Q" form="zmm, zmm, zmm" xed="VPERMT2Q_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permute_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI
+IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI
+IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI
+IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI
+IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI
+IF (imm8[4] == 0) tmp_dst[319:256] := a[319:256]; FI
+IF (imm8[4] == 1) tmp_dst[319:256] := a[383:320]; FI
+IF (imm8[5] == 0) tmp_dst[383:320] := a[319:256]; FI
+IF (imm8[5] == 1) tmp_dst[383:320] := a[383:320]; FI
+IF (imm8[6] == 0) tmp_dst[447:384] := a[447:384]; FI
+IF (imm8[6] == 1) tmp_dst[447:384] := a[511:448]; FI
+IF (imm8[7] == 0) tmp_dst[511:448] := a[447:384]; FI
+IF (imm8[7] == 1) tmp_dst[511:448] := a[511:448]; FI
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="zmm {k}, zmm, imm8" xed="VPERMILPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutevar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI
+IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI
+IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI
+IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI
+IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI
+IF (b[257] == 0) tmp_dst[319:256] := a[319:256]; FI
+IF (b[257] == 1) tmp_dst[319:256] := a[383:320]; FI
+IF (b[321] == 0) tmp_dst[383:320] := a[319:256]; FI
+IF (b[321] == 1) tmp_dst[383:320] := a[383:320]; FI
+IF (b[385] == 0) tmp_dst[447:384] := a[447:384]; FI
+IF (b[385] == 1) tmp_dst[447:384] := a[511:448]; FI
+IF (b[449] == 0) tmp_dst[511:448] := a[447:384]; FI
+IF (b[449] == 1) tmp_dst[511:448] := a[511:448]; FI
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="zmm {k}, zmm, zmm" xed="VPERMILPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permute_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+IF (imm8[0] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (imm8[0] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (imm8[1] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (imm8[1] == 1) tmp_dst[127:64] := a[127:64]; FI
+IF (imm8[2] == 0) tmp_dst[191:128] := a[191:128]; FI
+IF (imm8[2] == 1) tmp_dst[191:128] := a[255:192]; FI
+IF (imm8[3] == 0) tmp_dst[255:192] := a[191:128]; FI
+IF (imm8[3] == 1) tmp_dst[255:192] := a[255:192]; FI
+IF (imm8[4] == 0) tmp_dst[319:256] := a[319:256]; FI
+IF (imm8[4] == 1) tmp_dst[319:256] := a[383:320]; FI
+IF (imm8[5] == 0) tmp_dst[383:320] := a[319:256]; FI
+IF (imm8[5] == 1) tmp_dst[383:320] := a[383:320]; FI
+IF (imm8[6] == 0) tmp_dst[447:384] := a[447:384]; FI
+IF (imm8[6] == 1) tmp_dst[447:384] := a[511:448]; FI
+IF (imm8[7] == 0) tmp_dst[511:448] := a[447:384]; FI
+IF (imm8[7] == 1) tmp_dst[511:448] := a[511:448]; FI
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="zmm {z}, zmm, imm8" xed="VPERMILPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutevar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+IF (b[1] == 0) tmp_dst[63:0] := a[63:0]; FI
+IF (b[1] == 1) tmp_dst[63:0] := a[127:64]; FI
+IF (b[65] == 0) tmp_dst[127:64] := a[63:0]; FI
+IF (b[65] == 1) tmp_dst[127:64] := a[127:64]; FI
+IF (b[129] == 0) tmp_dst[191:128] := a[191:128]; FI
+IF (b[129] == 1) tmp_dst[191:128] := a[255:192]; FI
+IF (b[193] == 0) tmp_dst[255:192] := a[191:128]; FI
+IF (b[193] == 1) tmp_dst[255:192] := a[255:192]; FI
+IF (b[257] == 0) tmp_dst[319:256] := a[319:256]; FI
+IF (b[257] == 1) tmp_dst[319:256] := a[383:320]; FI
+IF (b[321] == 0) tmp_dst[383:320] := a[319:256]; FI
+IF (b[321] == 1) tmp_dst[383:320] := a[383:320]; FI
+IF (b[385] == 0) tmp_dst[447:384] := a[447:384]; FI
+IF (b[385] == 1) tmp_dst[447:384] := a[511:448]; FI
+IF (b[449] == 0) tmp_dst[511:448] := a[447:384]; FI
+IF (b[449] == 1) tmp_dst[511:448] := a[511:448]; FI
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="zmm {z}, zmm, zmm" xed="VPERMILPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permute_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+IF (imm8[0] == 0) dst[63:0] := a[63:0]; FI
+IF (imm8[0] == 1) dst[63:0] := a[127:64]; FI
+IF (imm8[1] == 0) dst[127:64] := a[63:0]; FI
+IF (imm8[1] == 1) dst[127:64] := a[127:64]; FI
+IF (imm8[2] == 0) dst[191:128] := a[191:128]; FI
+IF (imm8[2] == 1) dst[191:128] := a[255:192]; FI
+IF (imm8[3] == 0) dst[255:192] := a[191:128]; FI
+IF (imm8[3] == 1) dst[255:192] := a[255:192]; FI
+IF (imm8[4] == 0) dst[319:256] := a[319:256]; FI
+IF (imm8[4] == 1) dst[319:256] := a[383:320]; FI
+IF (imm8[5] == 0) dst[383:320] := a[319:256]; FI
+IF (imm8[5] == 1) dst[383:320] := a[383:320]; FI
+IF (imm8[6] == 0) dst[447:384] := a[447:384]; FI
+IF (imm8[6] == 1) dst[447:384] := a[511:448]; FI
+IF (imm8[7] == 0) dst[511:448] := a[447:384]; FI
+IF (imm8[7] == 1) dst[511:448] := a[511:448]; FI
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="zmm, zmm, imm8" xed="VPERMILPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutevar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst".</description>
+ <operation>
+IF (b[1] == 0) dst[63:0] := a[63:0]; FI
+IF (b[1] == 1) dst[63:0] := a[127:64]; FI
+IF (b[65] == 0) dst[127:64] := a[63:0]; FI
+IF (b[65] == 1) dst[127:64] := a[127:64]; FI
+IF (b[129] == 0) dst[191:128] := a[191:128]; FI
+IF (b[129] == 1) dst[191:128] := a[255:192]; FI
+IF (b[193] == 0) dst[255:192] := a[191:128]; FI
+IF (b[193] == 1) dst[255:192] := a[255:192]; FI
+IF (b[257] == 0) dst[319:256] := a[319:256]; FI
+IF (b[257] == 1) dst[319:256] := a[383:320]; FI
+IF (b[321] == 0) dst[383:320] := a[319:256]; FI
+IF (b[321] == 1) dst[383:320] := a[383:320]; FI
+IF (b[385] == 0) dst[447:384] := a[447:384]; FI
+IF (b[385] == 1) dst[447:384] := a[511:448]; FI
+IF (b[449] == 0) dst[511:448] := a[447:384]; FI
+IF (b[449] == 1) dst[511:448] := a[511:448]; FI
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPD" form="zmm, zmm, zmm" xed="VPERMILPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permute_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0])
+tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2])
+tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4])
+tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6])
+tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0])
+tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2])
+tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4])
+tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="zmm {k}, zmm, imm8" xed="VPERMILPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutevar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], b[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], b[33:32])
+tmp_dst[95:64] := SELECT4(a[127:0], b[65:64])
+tmp_dst[127:96] := SELECT4(a[127:0], b[97:96])
+tmp_dst[159:128] := SELECT4(a[255:128], b[129:128])
+tmp_dst[191:160] := SELECT4(a[255:128], b[161:160])
+tmp_dst[223:192] := SELECT4(a[255:128], b[193:192])
+tmp_dst[255:224] := SELECT4(a[255:128], b[225:224])
+tmp_dst[287:256] := SELECT4(a[383:256], b[257:256])
+tmp_dst[319:288] := SELECT4(a[383:256], b[289:288])
+tmp_dst[351:320] := SELECT4(a[383:256], b[321:320])
+tmp_dst[383:352] := SELECT4(a[383:256], b[353:352])
+tmp_dst[415:384] := SELECT4(a[511:384], b[385:384])
+tmp_dst[447:416] := SELECT4(a[511:384], b[417:416])
+tmp_dst[479:448] := SELECT4(a[511:384], b[449:448])
+tmp_dst[511:480] := SELECT4(a[511:384], b[481:480])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="zmm {k}, zmm, zmm" xed="VPERMILPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permute_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0])
+tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2])
+tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4])
+tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6])
+tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0])
+tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2])
+tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4])
+tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="zmm {z}, zmm, imm8" xed="VPERMILPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutevar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], b[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], b[33:32])
+tmp_dst[95:64] := SELECT4(a[127:0], b[65:64])
+tmp_dst[127:96] := SELECT4(a[127:0], b[97:96])
+tmp_dst[159:128] := SELECT4(a[255:128], b[129:128])
+tmp_dst[191:160] := SELECT4(a[255:128], b[161:160])
+tmp_dst[223:192] := SELECT4(a[255:128], b[193:192])
+tmp_dst[255:224] := SELECT4(a[255:128], b[225:224])
+tmp_dst[287:256] := SELECT4(a[383:256], b[257:256])
+tmp_dst[319:288] := SELECT4(a[383:256], b[289:288])
+tmp_dst[351:320] := SELECT4(a[383:256], b[321:320])
+tmp_dst[383:352] := SELECT4(a[383:256], b[353:352])
+tmp_dst[415:384] := SELECT4(a[511:384], b[385:384])
+tmp_dst[447:416] := SELECT4(a[511:384], b[417:416])
+tmp_dst[479:448] := SELECT4(a[511:384], b[449:448])
+tmp_dst[511:480] := SELECT4(a[511:384], b[481:480])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="zmm {z}, zmm, zmm" xed="VPERMILPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permute_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+dst[287:256] := SELECT4(a[383:256], imm8[1:0])
+dst[319:288] := SELECT4(a[383:256], imm8[3:2])
+dst[351:320] := SELECT4(a[383:256], imm8[5:4])
+dst[383:352] := SELECT4(a[383:256], imm8[7:6])
+dst[415:384] := SELECT4(a[511:384], imm8[1:0])
+dst[447:416] := SELECT4(a[511:384], imm8[3:2])
+dst[479:448] := SELECT4(a[511:384], imm8[5:4])
+dst[511:480] := SELECT4(a[511:384], imm8[7:6])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="zmm, zmm, imm8" xed="VPERMILPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutevar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "b", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], b[1:0])
+dst[63:32] := SELECT4(a[127:0], b[33:32])
+dst[95:64] := SELECT4(a[127:0], b[65:64])
+dst[127:96] := SELECT4(a[127:0], b[97:96])
+dst[159:128] := SELECT4(a[255:128], b[129:128])
+dst[191:160] := SELECT4(a[255:128], b[161:160])
+dst[223:192] := SELECT4(a[255:128], b[193:192])
+dst[255:224] := SELECT4(a[255:128], b[225:224])
+dst[287:256] := SELECT4(a[383:256], b[257:256])
+dst[319:288] := SELECT4(a[383:256], b[289:288])
+dst[351:320] := SELECT4(a[383:256], b[321:320])
+dst[383:352] := SELECT4(a[383:256], b[353:352])
+dst[415:384] := SELECT4(a[511:384], b[385:384])
+dst[447:416] := SELECT4(a[511:384], b[417:416])
+dst[479:448] := SELECT4(a[511:384], b[449:448])
+dst[511:480] := SELECT4(a[511:384], b[481:480])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMILPS" form="zmm, zmm, zmm" xed="VPERMILPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutex_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0])
+tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2])
+tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4])
+tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMPD" form="zmm {k}, zmm, imm8" xed="VPERMPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutexvar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ id := idx[i+2:i]*64
+ IF k[j]
+ dst[i+63:i] := a[id+63:id]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMPD" form="zmm {k}, zmm, zmm" xed="VPERMPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutex_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0])
+tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2])
+tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4])
+tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMPD" form="zmm {z}, zmm, imm8" xed="VPERMPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutexvar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ id := idx[i+2:i]*64
+ IF k[j]
+ dst[i+63:i] := a[id+63:id]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMPD" form="zmm {z}, zmm, zmm" xed="VPERMPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutex_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+dst[319:256] := SELECT4(a[511:256], imm8[1:0])
+dst[383:320] := SELECT4(a[511:256], imm8[3:2])
+dst[447:384] := SELECT4(a[511:256], imm8[5:4])
+dst[511:448] := SELECT4(a[511:256], imm8[7:6])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMPD" form="zmm, zmm, imm8" xed="VPERMPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutexvar_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ id := idx[i+2:i]*64
+ dst[i+63:i] := a[id+63:id]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMPD" form="zmm, zmm, zmm" xed="VPERMPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutexvar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ id := idx[i+3:i]*32
+ IF k[j]
+ dst[i+31:i] := a[id+31:id]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMPS" form="zmm {k}, zmm, zmm" xed="VPERMPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutexvar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ id := idx[i+3:i]*32
+ IF k[j]
+ dst[i+31:i] := a[id+31:id]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMPS" form="zmm {z}, zmm, zmm" xed="VPERMPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutexvar_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" across lanes using the corresponding index in "idx".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ id := idx[i+3:i]*32
+ dst[i+31:i] := a[id+31:id]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMPS" form="zmm, zmm, zmm" xed="VPERMPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutex_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 64-bit integers in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0])
+tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2])
+tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4])
+tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMQ" form="zmm {k}, zmm, imm8" xed="VPERMQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutexvar_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ id := idx[i+2:i]*64
+ IF k[j]
+ dst[i+63:i] := a[id+63:id]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMQ" form="zmm {k}, zmm, zmm" xed="VPERMQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutex_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 64-bit integers in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+tmp_dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+tmp_dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+tmp_dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+tmp_dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+tmp_dst[319:256] := SELECT4(a[511:256], imm8[1:0])
+tmp_dst[383:320] := SELECT4(a[511:256], imm8[3:2])
+tmp_dst[447:384] := SELECT4(a[511:256], imm8[5:4])
+tmp_dst[511:448] := SELECT4(a[511:256], imm8[7:6])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMQ" form="zmm {z}, zmm, imm8" xed="VPERMQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutexvar_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ id := idx[i+2:i]*64
+ IF k[j]
+ dst[i+63:i] := a[id+63:id]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMQ" form="zmm {z}, zmm, zmm" xed="VPERMQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutex_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 64-bit integers in "a" within 256-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[63:0] := src[63:0]
+ 1: tmp[63:0] := src[127:64]
+ 2: tmp[63:0] := src[191:128]
+ 3: tmp[63:0] := src[255:192]
+ ESAC
+ RETURN tmp[63:0]
+}
+dst[63:0] := SELECT4(a[255:0], imm8[1:0])
+dst[127:64] := SELECT4(a[255:0], imm8[3:2])
+dst[191:128] := SELECT4(a[255:0], imm8[5:4])
+dst[255:192] := SELECT4(a[255:0], imm8[7:6])
+dst[319:256] := SELECT4(a[511:256], imm8[1:0])
+dst[383:320] := SELECT4(a[511:256], imm8[3:2])
+dst[447:384] := SELECT4(a[511:256], imm8[5:4])
+dst[511:448] := SELECT4(a[511:256], imm8[7:6])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMQ" form="zmm, zmm, imm8" xed="VPERMQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutexvar_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="idx" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Shuffle 64-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ id := idx[i+2:i]*64
+ dst[i+63:i] := a[id+63:id]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMQ" form="zmm, zmm, zmm" xed="VPERMQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expand_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="zmm {k}, zmm" xed="VPEXPANDD_ZMMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expandloadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <description>Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="zmm {k}, m32" xed="VPEXPANDD_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expand_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Load contiguous active 32-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[m+31:m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="zmm {z}, zmm" xed="VPEXPANDD_ZMMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expandloadu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <description>Load contiguous active 32-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+m+31:mem_addr+m]
+ m := m + 32
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDD" form="zmm {z}, m32" xed="VPEXPANDD_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expand_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="zmm {k}, zmm" xed="VPEXPANDQ_ZMMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expandloadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <description>Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="zmm {k}, m64" xed="VPEXPANDQ_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expand_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Load contiguous active 64-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[m+63:m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="zmm {z}, zmm" xed="VPEXPANDQ_ZMMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expandloadu_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <description>Load contiguous active 64-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+m+63:mem_addr+m]
+ m := m + 64
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDQ" form="zmm {z}, m64" xed="VPEXPANDQ_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i32gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="zmm, vm32y" xed="VPGATHERDQ_ZMMu64_MASKmskw_MEMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i32gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="zmm {k}, vm32y" xed="VPGATHERDQ_ZMMu64_MASKmskw_MEMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i64gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPGATHERQD" form="ymm, vm64z" xed="VPGATHERQD_YMMu32_MASKmskw_MEMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i64gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPGATHERQD" form="ymm {k}, vm64z" xed="VPGATHERQD_YMMu32_MASKmskw_MEMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i64gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERQQ" form="zmm, vm64z" xed="VPGATHERQQ_ZMMu64_MASKmskw_MEMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i64gather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 64-bit integers from memory using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERQQ" form="zmm {k}, vm64z" xed="VPGATHERQQ_ZMMu64_MASKmskw_MEMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSD" form="zmm {z}, zmm, zmm" xed="VPMAXSD_ZMMi32_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_max_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSQ" form="zmm {k}, zmm, zmm" xed="VPMAXSQ_ZMMi64_MASKmskw_ZMMi64_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSQ" form="zmm {z}, zmm, zmm" xed="VPMAXSQ_ZMMi64_MASKmskw_ZMMi64_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_max_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSQ" form="zmm, zmm, zmm" xed="VPMAXSQ_ZMMi64_MASKmskw_ZMMi64_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUD" form="zmm {z}, zmm, zmm" xed="VPMAXUD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_max_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUQ" form="zmm {k}, zmm, zmm" xed="VPMAXUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_max_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUQ" form="zmm {z}, zmm, zmm" xed="VPMAXUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_max_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUQ" form="zmm, zmm, zmm" xed="VPMAXUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSD" form="zmm {z}, zmm, zmm" xed="VPMINSD_ZMMi32_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_min_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSQ" form="zmm {k}, zmm, zmm" xed="VPMINSQ_ZMMi64_MASKmskw_ZMMi64_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSQ" form="zmm {z}, zmm, zmm" xed="VPMINSQ_ZMMi64_MASKmskw_ZMMi64_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_min_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSQ" form="zmm, zmm, zmm" xed="VPMINSQ_ZMMi64_MASKmskw_ZMMi64_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUD" form="zmm {z}, zmm, zmm" xed="VPMINUD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_min_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUQ" form="zmm {k}, zmm, zmm" xed="VPMINUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_min_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUQ" form="zmm {z}, zmm, zmm" xed="VPMINUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_min_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compare packed unsigned 64-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUQ" form="zmm, zmm, zmm" xed="VPMINUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ k := 8*j
+ dst[k+7:k] := Truncate8(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVDB" form="xmm, zmm" xed="VPMOVDB_XMMu8_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVDB" form="xmm {k}, zmm" xed="VPMOVDB_XMMu8_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi32_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="128"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVDB" form="m128 {k}, zmm" xed="VPMOVDB_MEMu8_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVDB" form="xmm {z}, zmm" xed="VPMOVDB_XMMu8_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ k := 16*j
+ dst[k+15:k] := Truncate16(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVDW" form="ymm, zmm" xed="VPMOVDW_YMMu16_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVDW" form="ymm {k}, zmm" xed="VPMOVDW_YMMu16_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi32_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="256"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVDW" form="m256 {k}, zmm" xed="VPMOVDW_MEMu16_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed 32-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVDW" form="ymm {z}, zmm" xed="VPMOVDW_YMMu16_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 8*j
+ dst[k+7:k] := Truncate8(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQB" form="xmm, zmm" xed="VPMOVQB_XMMu8_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQB" form="xmm {k}, zmm" xed="VPMOVQB_XMMu8_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi64_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Truncate8(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVQB" form="m64 {k}, zmm" xed="VPMOVQB_MEMu8_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 8-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Truncate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQB" form="xmm {z}, zmm" xed="VPMOVQB_XMMu8_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 32*j
+ dst[k+31:k] := Truncate32(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVQD" form="ymm, zmm" xed="VPMOVQD_YMMu32_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Truncate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVQD" form="ymm {k}, zmm" xed="VPMOVQD_YMMu32_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi64_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ MEM[base_addr+l+31:base_addr+l] := Truncate32(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVQD" form="m256 {k}, zmm" xed="VPMOVQD_MEMu32_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 32-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Truncate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVQD" form="ymm {z}, zmm" xed="VPMOVQD_YMMu32_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 16*j
+ dst[k+15:k] := Truncate16(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQW" form="xmm, zmm" xed="VPMOVQW_XMMu16_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQW" form="xmm {k}, zmm" xed="VPMOVQW_XMMu16_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi64_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Truncate16(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVQW" form="m128 {k}, zmm" xed="VPMOVQW_MEMu16_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed 64-bit integers in "a" to packed 16-bit integers with truncation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Truncate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVQW" form="xmm {z}, zmm" xed="VPMOVQW_XMMu16_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtsepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ k := 8*j
+ dst[k+7:k] := Saturate8(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSDB" form="xmm, zmm" xed="VPMOVSDB_XMMi8_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="src" etype="SI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSDB" form="xmm {k}, zmm" xed="VPMOVSDB_XMMi8_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi32_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI8" memwidth="128"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSDB" form="m128 {k}, zmm" xed="VPMOVSDB_MEMi8_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtsepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSDB" form="xmm {z}, zmm" xed="VPMOVSDB_XMMi8_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtsepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ k := 16*j
+ dst[k+15:k] := Saturate16(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSDW" form="ymm, zmm" xed="VPMOVSDW_YMMi16_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__m256i" varname="src" etype="SI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSDW" form="ymm {k}, zmm" xed="VPMOVSDW_YMMi16_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi32_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI16" memwidth="256"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSDW" form="m256 {k}, zmm" xed="VPMOVSDW_MEMi16_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtsepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSDW" form="ymm {z}, zmm" xed="VPMOVSDW_YMMi16_MASKmskw_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtsepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 8*j
+ dst[k+7:k] := Saturate8(a[i+63:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSQB" form="xmm, zmm" xed="VPMOVSQB_XMMi8_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="src" etype="SI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSQB" form="xmm {k}, zmm" xed="VPMOVSQB_XMMi8_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi64_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI8" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := Saturate8(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSQB" form="m64 {k}, zmm" xed="VPMOVSQB_MEMi8_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtsepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 8-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := Saturate8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVSQB" form="xmm {z}, zmm" xed="VPMOVSQB_XMMi8_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtsepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 32*j
+ dst[k+31:k] := Saturate32(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSQD" form="ymm, zmm" xed="VPMOVSQD_YMMi32_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Saturate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSQD" form="ymm {k}, zmm" xed="VPMOVSQD_YMMi32_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi64_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI32" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ MEM[base_addr+l+31:base_addr+l] := Saturate32(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSQD" form="m256 {k}, zmm" xed="VPMOVSQD_MEMi32_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtsepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 32-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := Saturate32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVSQD" form="ymm {z}, zmm" xed="VPMOVSQD_YMMi32_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtsepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 16*j
+ dst[k+15:k] := Saturate16(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSQW" form="xmm, zmm" xed="VPMOVSQW_XMMi16_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="src" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSQW" form="xmm {k}, zmm" xed="VPMOVSQW_XMMi16_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtsepi64_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="SI16" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := Saturate16(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVSQW" form="m128 {k}, zmm" xed="VPMOVSQW_MEMi16_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtsepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Convert packed signed 64-bit integers in "a" to packed 16-bit integers with signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := Saturate16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVSQW" form="xmm {z}, zmm" xed="VPMOVSQW_XMMi16_MASKmskw_ZMMi64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ k := 8*j
+ dst[i+31:i] := SignExtend32(a[k+7:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXBD" form="zmm, xmm" xed="VPMOVSXBD_ZMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXBD" form="zmm {k}, xmm" xed="VPMOVSXBD_ZMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXBD" form="zmm {z}, xmm" xed="VPMOVSXBD_ZMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 8*j
+ dst[i+63:i] := SignExtend64(a[k+7:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXBQ" form="zmm, xmm" xed="VPMOVSXBQ_ZMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__m512i" varname="src" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXBQ" form="zmm {k}, xmm" xed="VPMOVSXBQ_ZMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXBQ" form="zmm {z}, xmm" xed="VPMOVSXBQ_ZMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 32*j
+ dst[i+63:i] := SignExtend64(a[k+31:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXDQ" form="zmm, ymm" xed="VPMOVSXDQ_ZMMi64_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__m512i" varname="src" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXDQ" form="zmm {k}, ymm" xed="VPMOVSXDQ_ZMMi64_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI32"/>
+ <description>Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXDQ" form="zmm {z}, ymm" xed="VPMOVSXDQ_ZMMi64_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ k := 16*j
+ dst[i+31:i] := SignExtend32(a[k+15:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXWD" form="zmm, ymm" xed="VPMOVSXWD_ZMMi32_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ l := j*16
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXWD" form="zmm {k}, ymm" xed="VPMOVSXWD_ZMMi32_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[i+31:i] := SignExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXWD" form="zmm {z}, ymm" xed="VPMOVSXWD_ZMMi32_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepi16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 16*j
+ dst[i+63:i] := SignExtend64(a[k+15:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXWQ" form="zmm, xmm" xed="VPMOVSXWQ_ZMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepi16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__m512i" varname="src" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXWQ" form="zmm {k}, xmm" xed="VPMOVSXWQ_ZMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepi16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVSXWQ" form="zmm {z}, xmm" xed="VPMOVSXWQ_ZMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtusepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ k := 8*j
+ dst[k+7:k] := SaturateU8(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSDB" form="xmm, zmm" xed="VPMOVUSDB_XMMu8_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSDB" form="xmm {k}, zmm" xed="VPMOVUSDB_XMMu8_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi32_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="128"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSDB" form="m128 {k}, zmm" xed="VPMOVUSDB_MEMu8_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtusepi32_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+31:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSDB" form="xmm {z}, zmm" xed="VPMOVUSDB_XMMu8_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtusepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ k := 16*j
+ dst[k+15:k] := SaturateU16(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVUSDW" form="ymm, zmm" xed="VPMOVUSDW_YMMu16_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVUSDW" form="ymm {k}, zmm" xed="VPMOVUSDW_YMMu16_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi32_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="256"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSDW" form="m256 {k}, zmm" xed="VPMOVUSDW_MEMu16_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtusepi32_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Convert packed unsigned 32-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+31:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVUSDW" form="ymm {z}, zmm" xed="VPMOVUSDW_YMMu16_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtusepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 8*j
+ dst[k+7:k] := SaturateU8(a[i+63:i])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSQB" form="xmm, zmm" xed="VPMOVUSQB_XMMu8_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := src[l+7:l]
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSQB" form="xmm {k}, zmm" xed="VPMOVUSQB_XMMu8_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi64_storeu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed 8-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ MEM[base_addr+l+7:base_addr+l] := SaturateU8(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSQB" form="m64 {k}, zmm" xed="VPMOVUSQB_MEMu8_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtusepi64_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 8-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[l+7:l] := SaturateU8(a[i+63:i])
+ ELSE
+ dst[l+7:l] := 0
+ FI
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPMOVUSQB" form="xmm {z}, zmm" xed="VPMOVUSQB_XMMu8_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtusepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 32*j
+ dst[k+31:k] := SaturateU32(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVUSQD" form="ymm, zmm" xed="VPMOVUSQD_YMMu32_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := SaturateU32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVUSQD" form="ymm {k}, zmm" xed="VPMOVUSQD_YMMu32_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi64_storeu_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32" memwidth="256"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed 32-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ MEM[base_addr+l+31:base_addr+l] := SaturateU32(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSQD" form="m256 {k}, zmm" xed="VPMOVUSQD_MEMu32_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtusepi64_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 32-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[l+31:l] := SaturateU32(a[i+63:i])
+ ELSE
+ dst[l+31:l] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMOVUSQD" form="ymm {z}, zmm" xed="VPMOVUSQD_YMMu32_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtusepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 16*j
+ dst[k+15:k] := SaturateU16(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSQW" form="xmm, zmm" xed="VPMOVUSQW_XMMu16_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := src[l+15:l]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSQW" form="xmm {k}, zmm" xed="VPMOVUSQW_XMMu16_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtusepi64_storeu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed 16-bit integers with unsigned saturation, and store the active results (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ MEM[base_addr+l+15:base_addr+l] := SaturateU16(a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPMOVUSQW" form="m128 {k}, zmm" xed="VPMOVUSQW_MEMu16_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtusepi64_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Convert packed unsigned 64-bit integers in "a" to packed unsigned 16-bit integers with unsigned saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[l+15:l] := SaturateU16(a[i+63:i])
+ ELSE
+ dst[l+15:l] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMOVUSQW" form="xmm {z}, zmm" xed="VPMOVUSQW_XMMu16_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepu8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ k := 8*j
+ dst[i+31:i] := ZeroExtend32(a[k+7:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXBD" form="zmm, xmm" xed="VPMOVZXBD_ZMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepu8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXBD" form="zmm {k}, xmm" xed="VPMOVZXBD_ZMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepu8_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 8*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+7:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXBD" form="zmm {z}, xmm" xed="VPMOVZXBD_ZMMi32_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepu8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 8*j
+ dst[i+63:i] := ZeroExtend64(a[k+7:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXBQ" form="zmm, xmm" xed="VPMOVZXBQ_ZMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepu8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXBQ" form="zmm {k}, xmm" xed="VPMOVZXBQ_ZMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepu8_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 8*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+7:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXBQ" form="zmm {z}, xmm" xed="VPMOVZXBQ_ZMMi64_MASKmskw_XMMi8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepu32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 32*j
+ dst[i+63:i] := ZeroExtend64(a[k+31:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXDQ" form="zmm, ymm" xed="VPMOVZXDQ_ZMMi64_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepu32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXDQ" form="zmm {k}, ymm" xed="VPMOVZXDQ_ZMMi64_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepu32_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 32*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+31:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXDQ" form="zmm {z}, ymm" xed="VPMOVZXDQ_ZMMi64_MASKmskw_YMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepu16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ k := 16*j
+ dst[i+31:i] := ZeroExtend32(a[k+15:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXWD" form="zmm, ymm" xed="VPMOVZXWD_ZMMi32_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepu16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXWD" form="zmm {k}, ymm" xed="VPMOVZXWD_ZMMi32_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepu16_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ l := 16*j
+ IF k[j]
+ dst[i+31:i] := ZeroExtend32(a[l+15:l])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXWD" form="zmm {z}, ymm" xed="VPMOVZXWD_ZMMi32_MASKmskw_YMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtepu16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ k := 16*j
+ dst[i+63:i] := ZeroExtend64(a[k+15:k])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXWQ" form="zmm, xmm" xed="VPMOVZXWQ_ZMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtepu16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXWQ" form="zmm {k}, xmm" xed="VPMOVZXWQ_ZMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtepu16_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ l := 16*j
+ IF k[j]
+ dst[i+63:i] := ZeroExtend64(a[l+15:l])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMOVZXWQ" form="zmm {z}, xmm" xed="VPMOVZXWQ_ZMMi64_MASKmskw_XMMi16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_mul_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__m512i" varname="src" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULDQ" form="zmm {k}, zmm, zmm" xed="VPMULDQ_ZMMi64_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mul_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULDQ" form="zmm {z}, zmm, zmm" xed="VPMULDQ_ZMMi64_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mul_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULDQ" form="zmm, zmm, zmm" xed="VPMULDQ_ZMMi64_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_mul_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULUDQ" form="zmm {k}, zmm, zmm" xed="VPMULUDQ_ZMMu64_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_mul_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULUDQ" form="zmm {z}, zmm, zmm" xed="VPMULUDQ_ZMMu64_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mul_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+31:i] * b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULUDQ" form="zmm, zmm, zmm" xed="VPMULUDQ_ZMMu64_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_or_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPORD" form="zmm {z}, zmm, zmm" xed="VPORD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_or_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPORQ" form="zmm {z}, zmm, zmm" xed="VPORQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rol_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLD" form="zmm {k}, zmm, imm8" xed="VPROLD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rol_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLD" form="zmm {z}, zmm, imm8" xed="VPROLD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rol_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLD" form="zmm, zmm, imm8" xed="VPROLD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rol_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLQ" form="zmm {k}, zmm, imm8" xed="VPROLQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rol_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLQ" form="zmm {z}, zmm, imm8" xed="VPROLQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rol_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLQ" form="zmm, zmm, imm8" xed="VPROLQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rolv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLVD" form="zmm {k}, zmm, zmm" xed="VPROLVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rolv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLVD" form="zmm {z}, zmm, zmm" xed="VPROLVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rolv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := LEFT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLVD" form="zmm, zmm, zmm" xed="VPROLVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rolv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLVQ" form="zmm {k}, zmm, zmm" xed="VPROLVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rolv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLVQ" form="zmm {z}, zmm, zmm" xed="VPROLVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rolv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the left by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE LEFT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &lt;&lt; count) OR (src &gt;&gt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := LEFT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPROLVQ" form="zmm, zmm, zmm" xed="VPROLVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_ror_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORD" form="zmm {k}, zmm, imm8" xed="VPRORD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_ror_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORD" form="zmm {z}, zmm, imm8" xed="VPRORD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_ror_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORD" form="zmm, zmm, imm8" xed="VPRORD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_ror_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORQ" form="zmm {k}, zmm, imm8" xed="VPRORQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_ror_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORQ" form="zmm {z}, zmm, imm8" xed="VPRORQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_ror_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORQ" form="zmm, zmm, imm8" xed="VPRORQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rorv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORVD" form="zmm {k}, zmm, zmm" xed="VPRORVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rorv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORVD" form="zmm {z}, zmm, zmm" xed="VPRORVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rorv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Rotate the bits in each packed 32-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_DWORDS(src, count_src) {
+ count := count_src % 32
+ RETURN (src &gt;&gt;count) OR (src &lt;&lt; (32 - count))
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := RIGHT_ROTATE_DWORDS(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORVD" form="zmm, zmm, zmm" xed="VPRORVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rorv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORVQ" form="zmm {k}, zmm, zmm" xed="VPRORVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rorv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORVQ" form="zmm {z}, zmm, zmm" xed="VPRORVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rorv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Rotate the bits in each packed 64-bit integer in "a" to the right by the number of bits specified in the corresponding element of "b", and store the results in "dst".</description>
+ <operation>
+DEFINE RIGHT_ROTATE_QWORDS(src, count_src) {
+ count := count_src % 64
+ RETURN (src &gt;&gt; count) OR (src &lt;&lt; (64 - count))
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := RIGHT_ROTATE_QWORDS(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPRORVQ" form="zmm, zmm, zmm" xed="VPRORVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i32scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDQ" form="vm32y, zmm" xed="VPSCATTERDQ_MEMu64_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i32scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDQ" form="vm32y {k}, zmm" xed="VPSCATTERDQ_MEMu64_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i64scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQD" form="vm64z, ymm" xed="VPSCATTERQD_MEMu32_MASKmskw_YMMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i64scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQD" form="vm64z {k}, ymm" xed="VPSCATTERQD_MEMu32_MASKmskw_YMMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i64scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQQ" form="vm64z, zmm" xed="VPSCATTERQQ_MEMu64_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i64scatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 64-bit integers from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERQQ" form="vm64z {k}, zmm" xed="VPSCATTERQQ_MEMu64_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shuffle_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="_MM_PERM_ENUM" varname="imm8" etype="IMM" immtype="_MM_PERM"/>
+ <description>Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0])
+tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2])
+tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4])
+tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6])
+tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0])
+tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2])
+tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4])
+tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFD" form="zmm {z}, zmm, imm8" xed="VPSHUFD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sll_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLD" form="zmm {k}, zmm, xmm" xed="VPSLLD_ZMMu32_MASKmskw_ZMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sll_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLD" form="zmm {z}, zmm, xmm" xed="VPSLLD_ZMMu32_MASKmskw_ZMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_slli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLD" form="zmm {z}, zmm, imm8" xed="VPSLLD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sll_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLD" form="zmm, zmm, xmm" xed="VPSLLD_ZMMu32_MASKmskw_ZMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sll_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="zmm {k}, zmm, xmm" xed="VPSLLQ_ZMMu64_MASKmskw_ZMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_slli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="zmm {k}, zmm, imm8" xed="VPSLLQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sll_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="zmm {z}, zmm, xmm" xed="VPSLLQ_ZMMu64_MASKmskw_ZMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_slli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="zmm {z}, zmm, imm8" xed="VPSLLQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sll_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="zmm, zmm, xmm" xed="VPSLLQ_ZMMu64_MASKmskw_ZMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_slli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLQ" form="zmm, zmm, imm8" xed="VPSLLQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sllv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLVD" form="zmm {z}, zmm, zmm" xed="VPSLLVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sllv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLVQ" form="zmm {k}, zmm, zmm" xed="VPSLLVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sllv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLVQ" form="zmm {z}, zmm, zmm" xed="VPSLLVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sllv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLVQ" form="zmm, zmm, zmm" xed="VPSLLVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sra_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAD" form="zmm {k}, zmm, xmm" xed="VPSRAD_ZMMu32_MASKmskw_ZMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sra_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAD" form="zmm {z}, zmm, xmm" xed="VPSRAD_ZMMu32_MASKmskw_ZMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srai_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAD" form="zmm {z}, zmm, imm8" xed="VPSRAD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sra_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAD" form="zmm, zmm, xmm" xed="VPSRAD_ZMMu32_MASKmskw_ZMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sra_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="zmm {k}, zmm, xmm" xed="VPSRAQ_ZMMu64_MASKmskw_ZMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_srai_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="zmm {k}, zmm, imm8" xed="VPSRAQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sra_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="zmm {z}, zmm, xmm" xed="VPSRAQ_ZMMu64_MASKmskw_ZMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srai_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="zmm {z}, zmm, imm8" xed="VPSRAQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sra_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="zmm, zmm, xmm" xed="VPSRAQ_ZMMu64_MASKmskw_ZMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_srai_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0x0)
+ ELSE
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAQ" form="zmm, zmm, imm8" xed="VPSRAQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srav_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAVD" form="zmm {z}, zmm, zmm" xed="VPSRAVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_srav_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAVQ" form="zmm {k}, zmm, zmm" xed="VPSRAVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srav_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAVQ" form="zmm {z}, zmm, zmm" xed="VPSRAVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_srav_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := SignExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := (a[i+63] ? 0xFFFFFFFFFFFFFFFF : 0)
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAVQ" form="zmm, zmm, zmm" xed="VPSRAVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_srl_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLD" form="zmm {k}, zmm, xmm" xed="VPSRLD_ZMMu32_MASKmskw_ZMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srl_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLD" form="zmm {z}, zmm, xmm" xed="VPSRLD_ZMMu32_MASKmskw_ZMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLD" form="zmm {z}, zmm, imm8" xed="VPSRLD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_srl_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLD" form="zmm, zmm, xmm" xed="VPSRLD_ZMMu32_MASKmskw_ZMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_srl_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="zmm {k}, zmm, xmm" xed="VPSRLQ_ZMMu64_MASKmskw_ZMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_srli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="zmm {k}, zmm, imm8" xed="VPSRLQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srl_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="zmm {z}, zmm, xmm" xed="VPSRLQ_ZMMu64_MASKmskw_ZMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="zmm {z}, zmm, imm8" xed="VPSRLQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_srl_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="zmm, zmm, xmm" xed="VPSRLQ_ZMMu64_MASKmskw_ZMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_srli_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLQ" form="zmm, zmm, imm8" xed="VPSRLQ_ZMMu64_MASKmskw_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srlv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLVD" form="zmm {z}, zmm, zmm" xed="VPSRLVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_srlv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLVQ" form="zmm {k}, zmm, zmm" xed="VPSRLVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_srlv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLVQ" form="zmm {z}, zmm, zmm" xed="VPSRLVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_srlv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF count[i+63:i] &lt; 64
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLVQ" form="zmm, zmm, zmm" xed="VPSRLVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sub_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBD" form="zmm {z}, zmm, zmm" xed="VPSUBD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sub_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBQ" form="zmm {k}, zmm, zmm" xed="VPSUBQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sub_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBQ" form="zmm {z}, zmm, zmm" xed="VPSUBQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sub_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBQ" form="zmm, zmm, zmm" xed="VPSUBQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_ternarylogic_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 32-bit granularity (32-bit elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ FOR h := 0 to 31
+ index[2:0] := (src[i+h] &lt;&lt; 2) OR (a[i+h] &lt;&lt; 1) OR b[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPTERNLOGD" form="zmm {k}, zmm, zmm, imm8" xed="VPTERNLOGD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_ternarylogic_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 32-bit granularity (32-bit elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ FOR h := 0 to 31
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPTERNLOGD" form="zmm {z}, zmm, zmm, imm8" xed="VPTERNLOGD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_ternarylogic_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 32-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ FOR h := 0 to 31
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPTERNLOGD" form="zmm, zmm, zmm, imm8" xed="VPTERNLOGD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_ternarylogic_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "src", "a", and "b" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using writemask "k" at 64-bit granularity (64-bit elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ FOR h := 0 to 63
+ index[2:0] := (src[i+h] &lt;&lt; 2) OR (a[i+h] &lt;&lt; 1) OR b[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPTERNLOGQ" form="zmm {k}, zmm, zmm, imm8" xed="VPTERNLOGQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_ternarylogic_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst" using zeromask "k" at 64-bit granularity (64-bit elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ FOR h := 0 to 63
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPTERNLOGQ" form="zmm {z}, zmm, zmm, imm8" xed="VPTERNLOGQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_ternarylogic_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Bitwise ternary logic that provides the capability to implement any three-operand binary function; the specific binary function is specified by value in "imm8". For each bit in each packed 64-bit integer, the corresponding bit from "a", "b", and "c" are used to form a 3 bit index into "imm8", and the value at that bit in "imm8" is written to the corresponding bit in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ FOR h := 0 to 63
+ index[2:0] := (a[i+h] &lt;&lt; 2) OR (b[i+h] &lt;&lt; 1) OR c[i+h]
+ dst[i+h] := imm8[index[2:0]]
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPTERNLOGQ" form="zmm, zmm, zmm, imm8" xed="VPTERNLOGQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_test_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTMQ" form="k {k}, zmm, zmm" xed="VPTESTMQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_test_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ((a[i+63:i] AND b[i+63:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTMQ" form="k, zmm, zmm" xed="VPTESTMQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_testn_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTNMD" form="k {k}, zmm, zmm" xed="VPTESTNMD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_testn_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NAND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ((a[i+31:i] AND b[i+31:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTNMD" form="k, zmm, zmm" xed="VPTESTNMD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_testn_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTNMQ" form="k {k}, zmm, zmm" xed="VPTESTNMQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_testn_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NAND of packed 64-bit integers in "a" and "b", producing intermediate 64-bit values, and set the corresponding bit in result mask "k" if the intermediate value is zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := ((a[i+63:i] AND b[i+63:i]) == 0) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VPTESTNMQ" form="k, zmm, zmm" xed="VPTESTNMQ_MASKmskw_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHDQ" form="zmm {k}, zmm, zmm" xed="VPUNPCKHDQ_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHDQ" form="zmm {z}, zmm, zmm" xed="VPUNPCKHDQ_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHDQ" form="zmm, zmm, zmm" xed="VPUNPCKHDQ_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHQDQ" form="zmm {k}, zmm, zmm" xed="VPUNPCKHQDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHQDQ" form="zmm {z}, zmm, zmm" xed="VPUNPCKHQDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKHQDQ" form="zmm, zmm, zmm" xed="VPUNPCKHQDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLDQ" form="zmm {k}, zmm, zmm" xed="VPUNPCKLDQ_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLDQ" form="zmm {z}, zmm, zmm" xed="VPUNPCKLDQ_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLDQ" form="zmm, zmm, zmm" xed="VPUNPCKLDQ_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLQDQ" form="zmm {k}, zmm, zmm" xed="VPUNPCKLQDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLQDQ" form="zmm {z}, zmm, zmm" xed="VPUNPCKLQDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPUNPCKLQDQ" form="zmm, zmm, zmm" xed="VPUNPCKLQDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_xor_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPXORD" form="zmm {z}, zmm, zmm" xed="VPXORD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_xor_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPXORQ" form="zmm {z}, zmm, zmm" xed="VPXORQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rcp14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRCP14PD" form="zmm {k}, zmm" xed="VRCP14PD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rcp14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRCP14PD" form="zmm {z}, zmm" xed="VRCP14PD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rcp14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRCP14PD" form="zmm, zmm" xed="VRCP14PD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rcp14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRCP14PS" form="zmm {k}, zmm" xed="VRCP14PS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rcp14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRCP14PS" form="zmm {z}, zmm" xed="VRCP14PS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rcp14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRCP14PS" form="zmm, zmm" xed="VRCP14PS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rcp14_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14SD" form="xmm {k}, xmm, xmm" xed="VRCP14SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rcp14_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14SD" form="xmm {z}, xmm, xmm" xed="VRCP14SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rcp14_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+dst[63:0] := (1.0 / b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14SD" form="xmm, xmm, xmm" xed="VRCP14SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rcp14_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14SS" form="xmm {k}, xmm, xmm" xed="VRCP14SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rcp14_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14SS" form="xmm {z}, xmm, xmm" xed="VRCP14SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rcp14_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+dst[31:0] := (1.0 / b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRCP14SS" form="xmm, xmm, xmm" xed="VRCP14SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_roundscale_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="zmm {k}, zmm, imm8" xed="VRNDSCALEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_roundscale_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="zmm {k}, zmm, imm8 {sae}" xed="VRNDSCALEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_roundscale_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="zmm {z}, zmm, imm8" xed="VRNDSCALEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_roundscale_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="zmm {z}, zmm, imm8 {sae}" xed="VRNDSCALEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_roundscale_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="zmm, zmm, imm8" xed="VRNDSCALEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_roundscale_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round packed double-precision (64-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := RoundScaleFP64(a[i+63:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPD" form="zmm, zmm, imm8 {sae}" xed="VRNDSCALEPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_roundscale_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="zmm {k}, zmm, imm8" xed="VRNDSCALEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_roundscale_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="zmm {k}, zmm, imm8 {sae}" xed="VRNDSCALEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_roundscale_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="zmm {z}, zmm, imm8" xed="VRNDSCALEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_roundscale_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="zmm {z}, zmm, imm8 {sae}" xed="VRNDSCALEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_roundscale_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="zmm, zmm, imm8" xed="VRNDSCALEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_roundscale_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round packed single-precision (32-bit) floating-point elements in "a" to the number of fraction bits specified by "imm8", and store the results in "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := RoundScaleFP32(a[i+31:i], imm8[7:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDSCALEPS" form="zmm, zmm, imm8 {sae}" xed="VRNDSCALEPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_roundscale_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+IF k[0]
+ dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESD" form="xmm {k}, xmm, xmm, imm8 {sae}" xed="VRNDSCALESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_roundscale_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+IF k[0]
+ dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESD" form="xmm {k}, xmm, xmm, imm8" xed="VRNDSCALESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_roundscale_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+IF k[0]
+ dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESD" form="xmm {z}, xmm, xmm, imm8 {sae}" xed="VRNDSCALESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_roundscale_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+IF k[0]
+ dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESD" form="xmm {z}, xmm, xmm, imm8" xed="VRNDSCALESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_roundscale_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESD" form="xmm, xmm, xmm, imm8 {sae}" xed="VRNDSCALESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_roundscale_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round the lower double-precision (64-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP64(src1[63:0], imm8[7:0]) {
+ m[63:0] := FP64(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[63:0] := POW(2.0, -m) * ROUND(POW(2.0, m) * src1[63:0], imm8[3:0])
+ IF IsInf(tmp[63:0])
+ tmp[63:0] := src1[63:0]
+ FI
+ RETURN tmp[63:0]
+}
+dst[63:0] := RoundScaleFP64(b[63:0], imm8[7:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESD" form="xmm, xmm, xmm, imm8" xed="VRNDSCALESD_XMMf64_MASKmskw_XMMf64_XMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_roundscale_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+IF k[0]
+ dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESS" form="xmm {k}, xmm, xmm, imm8 {sae}" xed="VRNDSCALESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_roundscale_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+IF k[0]
+ dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESS" form="xmm {k}, xmm, xmm, imm8" xed="VRNDSCALESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_roundscale_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+IF k[0]
+ dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESS" form="xmm {z}, xmm, xmm, imm8 {sae}" xed="VRNDSCALESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_roundscale_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+IF k[0]
+ dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESS" form="xmm {z}, xmm, xmm, imm8" xed="VRNDSCALESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_roundscale_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note][sae_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESS" form="xmm, xmm, xmm, imm8 {sae}" xed="VRNDSCALESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_roundscale_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_MM_REDUCE"/>
+ <description>Round the lower single-precision (32-bit) floating-point element in "b" to the number of fraction bits specified by "imm8", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". [round_imm_note]</description>
+ <operation>
+DEFINE RoundScaleFP32(src1[31:0], imm8[7:0]) {
+ m[31:0] := FP32(imm8[7:4]) // number of fraction bits after the binary point to be preserved
+ tmp[31:0] := POW(FP32(2.0), -m) * ROUND(POW(FP32(2.0), m) * src1[31:0], imm8[3:0])
+ IF IsInf(tmp[31:0])
+ tmp[31:0] := src1[31:0]
+ FI
+ RETURN tmp[31:0]
+}
+dst[31:0] := RoundScaleFP32(b[31:0], imm8[7:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRNDSCALESS" form="xmm, xmm, xmm, imm8" xed="VRNDSCALESS_XMMf32_MASKmskw_XMMf32_XMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rsqrt14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRSQRT14PD" form="zmm {k}, zmm" xed="VRSQRT14PD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rsqrt14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRSQRT14PD" form="zmm {z}, zmm" xed="VRSQRT14PD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rsqrt14_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (1.0 / SQRT(a[i+63:i]))
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRSQRT14PD" form="zmm, zmm" xed="VRSQRT14PD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_rsqrt14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRSQRT14PS" form="zmm {k}, zmm" xed="VRSQRT14PS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_rsqrt14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRSQRT14PS" form="zmm {z}, zmm" xed="VRSQRT14PS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_rsqrt14_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRSQRT14PS" form="zmm, zmm" xed="VRSQRT14PS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rsqrt14_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / SQRT(b[63:0]))
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT14SD" form="xmm {k}, xmm, xmm" xed="VRSQRT14SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rsqrt14_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+IF k[0]
+ dst[63:0] := (1.0 / SQRT(b[63:0]))
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT14SD" form="xmm {z}, xmm, xmm" xed="VRSQRT14SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rsqrt14_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the approximate reciprocal square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+dst[63:0] := (1.0 / SQRT(b[63:0]))
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT14SD" form="xmm, xmm, xmm" xed="VRSQRT14SD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_rsqrt14_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / SQRT(b[31:0]))
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT14SS" form="xmm {k}, xmm, xmm" xed="VRSQRT14SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_rsqrt14_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+IF k[0]
+ dst[31:0] := (1.0 / SQRT(b[31:0]))
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT14SS" form="xmm {z}, xmm, xmm" xed="VRSQRT14SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_rsqrt14_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 2^-14.</description>
+ <operation>
+dst[31:0] := (1.0 / SQRT(b[31:0]))
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VRSQRT14SS" form="xmm, xmm, xmm" xed="VRSQRT14SS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_scalef_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="zmm {k}, zmm, zmm" xed="VSCALEFPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_scalef_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="zmm {k}, zmm, zmm {er}" xed="VSCALEFPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_scalef_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="zmm {z}, zmm, zmm" xed="VSCALEFPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_scalef_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="zmm {z}, zmm, zmm {er}" xed="VSCALEFPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_scalef_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="zmm, zmm, zmm" xed="VSCALEFPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_scalef_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", and store the results in "dst".
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := SCALE(a[i+63:0], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPD" form="zmm, zmm, zmm {er}" xed="VSCALEFPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_scalef_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="zmm {k}, zmm, zmm" xed="VSCALEFPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_scalef_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="zmm {k}, zmm, zmm {er}" xed="VSCALEFPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_scalef_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="zmm {z}, zmm, zmm" xed="VSCALEFPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_scalef_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="zmm {z}, zmm, zmm {er}" xed="VSCALEFPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_scalef_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="zmm, zmm, zmm" xed="VSCALEFPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_scalef_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", and store the results in "dst".
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[31:0]
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := SCALE(a[i+31:0], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEFPS" form="zmm, zmm, zmm {er}" xed="VSCALEFPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_scalef_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+IF k[0]
+ dst[63:0] := SCALE(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSD" form="xmm {k}, xmm, xmm {er}" xed="VSCALEFSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_scalef_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+IF k[0]
+ dst[63:0] := SCALE(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSD" form="xmm {k}, xmm, xmm" xed="VSCALEFSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_scalef_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+IF k[0]
+ dst[63:0] := SCALE(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSD" form="xmm {z}, xmm, xmm {er}" xed="VSCALEFSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_scalef_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+IF k[0]
+ dst[63:0] := SCALE(a[63:0], b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSD" form="xmm {z}, xmm, xmm" xed="VSCALEFSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_scalef_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+dst[63:0] := SCALE(a[63:0], b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSD" form="xmm, xmm, xmm {er}" xed="VSCALEFSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_scalef_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Scale the packed double-precision (64-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[63:0] := tmp_src1[63:0] * POW(2.0, FLOOR(tmp_src2[63:0]))
+ RETURN dst[63:0]
+}
+dst[63:0] := SCALE(a[63:0], b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSD" form="xmm, xmm, xmm" xed="VSCALEFSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_scalef_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[63:0]
+}
+IF k[0]
+ dst[31:0] := SCALE(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSS" form="xmm {k}, xmm, xmm {er}" xed="VSCALEFSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_scalef_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[63:0]
+}
+IF k[0]
+ dst[31:0] := SCALE(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSS" form="xmm {k}, xmm, xmm" xed="VSCALEFSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_scalef_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[63:0]
+}
+IF k[0]
+ dst[31:0] := SCALE(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSS" form="xmm {z}, xmm, xmm {er}" xed="VSCALEFSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_scalef_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[63:0]
+}
+IF k[0]
+ dst[31:0] := SCALE(a[31:0], b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSS" form="xmm {z}, xmm, xmm" xed="VSCALEFSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_scalef_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[63:0]
+}
+dst[31:0] := SCALE(a[31:0], b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSS" form="xmm, xmm, xmm {er}" xed="VSCALEFSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_scalef_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Scale the packed single-precision (32-bit) floating-point elements in "a" using values from "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>DEFINE SCALE(src1, src2) {
+ IF (src2 == NaN)
+ IF (src2 == SNaN)
+ RETURN QNAN(src2)
+ FI
+ ELSE IF (src1 == NaN)
+ IF (src1 == SNaN)
+ RETURN QNAN(src1)
+ FI
+ IF (src2 != INF)
+ RETURN QNAN(src1)
+ FI
+ ELSE
+ tmp_src2 := src2
+ tmp_src1 := src1
+ IF (IS_DENORMAL(src2) AND MXCSR.DAZ)
+ tmp_src2 := 0
+ FI
+ IF (IS_DENORMAL(src1) AND MXCSR.DAZ)
+ tmp_src1 := 0
+ FI
+ FI
+ dst[31:0] := tmp_src1[31:0] * POW(2.0, FLOOR(tmp_src2[31:0]))
+ RETURN dst[63:0]
+}
+dst[31:0] := SCALE(a[31:0], b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSCALEFSS" form="xmm, xmm, xmm" xed="VSCALEFSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i32scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPD" form="vm32y, zmm" xed="VSCATTERDPD_MEMf64_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i32scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 32-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPD" form="vm32y {k}, zmm" xed="VSCATTERDPD_MEMf64_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i64scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPD" form="vm32z, zmm" xed="VSCATTERQPD_MEMf64_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i64scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter double-precision (64-bit) floating-point elements from "a" into memory using 64-bit indices. 64-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPD" form="vm32z {k}, zmm" xed="VSCATTERQPD_MEMf64_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_i64scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPS" form="vm32z, ymm" xed="VSCATTERQPS_MEMf32_MASKmskw_YMMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_i64scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 64-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERQPS" form="vm32z {k}, ymm" xed="VSCATTERQPS_MEMf32_MASKmskw_YMMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shuffle_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFF32X4" form="zmm {k}, zmm, zmm, imm8" xed="VSHUFF32X4_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shuffle_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFF32X4" form="zmm {z}, zmm, zmm, imm8" xed="VSHUFF32X4_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shuffle_f32x4">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 4 single-precision (32-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFF32X4" form="zmm, zmm, zmm, imm8" xed="VSHUFF32X4_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shuffle_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFF64X2" form="zmm {k}, zmm, zmm, imm8" xed="VSHUFF64X2_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shuffle_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFF64X2" form="zmm {z}, zmm, zmm, imm8" xed="VSHUFF64X2_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shuffle_f64x2">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 2 double-precision (64-bit) floating-point elements) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFF64X2" form="zmm, zmm, zmm, imm8" xed="VSHUFF64X2_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shuffle_i32x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFI32X4" form="zmm {k}, zmm, zmm, imm8" xed="VSHUFI32X4_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shuffle_i32x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFI32X4" form="zmm {z}, zmm, zmm, imm8" xed="VSHUFI32X4_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shuffle_i32x4">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 4 32-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFI32X4" form="zmm, zmm, zmm, imm8" xed="VSHUFI32X4_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shuffle_i64x2">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFI64X2" form="zmm {k}, zmm, zmm, imm8" xed="VSHUFI64X2_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shuffle_i64x2">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+tmp_dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+tmp_dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+tmp_dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+tmp_dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFI64X2" form="zmm {z}, zmm, zmm, imm8" xed="VSHUFI64X2_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shuffle_i64x2">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 128-bits (composed of 2 64-bit integers) selected by "imm8" from "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+dst[127:0] := SELECT4(a[511:0], imm8[1:0])
+dst[255:128] := SELECT4(a[511:0], imm8[3:2])
+dst[383:256] := SELECT4(b[511:0], imm8[5:4])
+dst[511:384] := SELECT4(b[511:0], imm8[7:6])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFI64X2" form="zmm, zmm, zmm, imm8" xed="VSHUFI64X2_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shuffle_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
+tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
+tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192]
+tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192]
+tmp_dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320]
+tmp_dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320]
+tmp_dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448]
+tmp_dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448]
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFPD" form="zmm {k}, zmm, zmm, imm8" xed="VSHUFPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shuffle_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+tmp_dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
+tmp_dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
+tmp_dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192]
+tmp_dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192]
+tmp_dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320]
+tmp_dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320]
+tmp_dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448]
+tmp_dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448]
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFPD" form="zmm {z}, zmm, zmm, imm8" xed="VSHUFPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shuffle_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements within 128-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
+dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
+dst[191:128] := (imm8[2] == 0) ? a[191:128] : a[255:192]
+dst[255:192] := (imm8[3] == 0) ? b[191:128] : b[255:192]
+dst[319:256] := (imm8[4] == 0) ? a[319:256] : a[383:320]
+dst[383:320] := (imm8[5] == 0) ? b[319:256] : b[383:320]
+dst[447:384] := (imm8[6] == 0) ? a[447:384] : a[511:448]
+dst[511:448] := (imm8[7] == 0) ? b[447:384] : b[511:448]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFPD" form="zmm, zmm, zmm, imm8" xed="VSHUFPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shuffle_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6])
+tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0])
+tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2])
+tmp_dst[351:320] := SELECT4(b[383:256], imm8[5:4])
+tmp_dst[383:352] := SELECT4(b[383:256], imm8[7:6])
+tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0])
+tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2])
+tmp_dst[479:448] := SELECT4(b[511:384], imm8[5:4])
+tmp_dst[511:480] := SELECT4(b[511:384], imm8[7:6])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFPS" form="zmm {k}, zmm, zmm, imm8" xed="VSHUFPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shuffle_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(b[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(b[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(b[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(b[255:128], imm8[7:6])
+tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0])
+tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2])
+tmp_dst[351:320] := SELECT4(b[383:256], imm8[5:4])
+tmp_dst[383:352] := SELECT4(b[383:256], imm8[7:6])
+tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0])
+tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2])
+tmp_dst[479:448] := SELECT4(b[511:384], imm8[5:4])
+tmp_dst[511:480] := SELECT4(b[511:384], imm8[7:6])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFPS" form="zmm {z}, zmm, zmm, imm8" xed="VSHUFPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shuffle_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+dst[95:64] := SELECT4(b[127:0], imm8[5:4])
+dst[127:96] := SELECT4(b[127:0], imm8[7:6])
+dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+dst[223:192] := SELECT4(b[255:128], imm8[5:4])
+dst[255:224] := SELECT4(b[255:128], imm8[7:6])
+dst[287:256] := SELECT4(a[383:256], imm8[1:0])
+dst[319:288] := SELECT4(a[383:256], imm8[3:2])
+dst[351:320] := SELECT4(b[383:256], imm8[5:4])
+dst[383:352] := SELECT4(b[383:256], imm8[7:6])
+dst[415:384] := SELECT4(a[511:384], imm8[1:0])
+dst[447:416] := SELECT4(a[511:384], imm8[3:2])
+dst[479:448] := SELECT4(b[511:384], imm8[5:4])
+dst[511:480] := SELECT4(b[511:384], imm8[7:6])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSHUFPS" form="zmm, zmm, zmm, imm8" xed="VSHUFPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SQRT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPD" form="zmm {k}, zmm" xed="VSQRTPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sqrt_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SQRT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPD" form="zmm {k}, zmm {er}" xed="VSQRTPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SQRT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPD" form="zmm {z}, zmm" xed="VSQRTPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sqrt_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note].</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SQRT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPD" form="zmm {z}, zmm {er}" xed="VSQRTPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := SQRT(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPD" form="zmm, zmm" xed="VSQRTPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sqrt_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".
+ [round_note].</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := SQRT(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPD" form="zmm, zmm {er}" xed="VSQRTPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SQRT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPS" form="zmm {k}, zmm" xed="VSQRTPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_sqrt_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SQRT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPS" form="zmm {k}, zmm {er}" xed="VSQRTPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SQRT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPS" form="zmm {z}, zmm" xed="VSQRTPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sqrt_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SQRT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPS" form="zmm {z}, zmm {er}" xed="VSQRTPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := SQRT(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPS" form="zmm, zmm" xed="VSQRTPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_sqrt_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".
+ [round_note].</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := SQRT(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSQRTPS" form="zmm, zmm {er}" xed="VSQRTPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sqrt_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := SQRT(b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTSD" form="xmm {k}, xmm, xmm {er}" xed="VSQRTSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sqrt_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := SQRT(b[63:0])
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTSD" form="xmm {k}, xmm, xmm" xed="VSQRTSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sqrt_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := SQRT(b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTSD" form="xmm {z}, xmm, xmm {er}" xed="VSQRTSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sqrt_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := SQRT(b[63:0])
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTSD" form="xmm {z}, xmm, xmm" xed="VSQRTSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_sqrt_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := SQRT(b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTSD" form="xmm, xmm, xmm {er}" xed="VSQRTSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sqrt_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := SQRT(b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTSS" form="xmm {k}, xmm, xmm {er}" xed="VSQRTSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sqrt_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := SQRT(b[31:0])
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTSS" form="xmm {k}, xmm, xmm" xed="VSQRTSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sqrt_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := SQRT(b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTSS" form="xmm {z}, xmm, xmm {er}" xed="VSQRTSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sqrt_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := SQRT(b[31:0])
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTSS" form="xmm {z}, xmm, xmm" xed="VSQRTSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_sqrt_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Compute the square root of the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := SQRT(b[31:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSQRTSS" form="xmm, xmm, xmm {er}" xed="VSQRTSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPD" form="zmm {z}, zmm, zmm" xed="VSUBPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPD" form="zmm {z}, zmm, zmm {er}" xed="VSUBPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPS" form="zmm {z}, zmm, zmm" xed="VSUBPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_sub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPS" form="zmm {z}, zmm, zmm {er}" xed="VSUBPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sub_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] - b[63:0]
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBSD" form="xmm {k}, xmm, xmm {er}" xed="VSUBSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sub_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] - b[63:0]
+ELSE
+ dst[63:0] := src[63:0]
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBSD" form="xmm {k}, xmm, xmm" xed="VSUBSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sub_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] - b[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBSD" form="xmm {z}, xmm, xmm {er}" xed="VSUBSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sub_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+IF k[0]
+ dst[63:0] := a[63:0] - b[63:0]
+ELSE
+ dst[63:0] := 0
+FI
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBSD" form="xmm {z}, xmm, xmm" xed="VSUBSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_sub_round_sd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := a[63:0] - b[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBSD" form="xmm, xmm, xmm {er}" xed="VSUBSD_XMMf64_MASKmskw_XMMf64_XMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sub_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] - b[31:0]
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBSS" form="xmm {k}, xmm, xmm {er}" xed="VSUBSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_sub_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using writemask "k" (the element is copied from "src" when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] - b[31:0]
+ELSE
+ dst[31:0] := src[31:0]
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBSS" form="xmm {k}, xmm, xmm" xed="VSUBSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sub_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] - b[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBSS" form="xmm {z}, xmm, xmm {er}" xed="VSUBSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_sub_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+IF k[0]
+ dst[31:0] := a[31:0] - b[31:0]
+ELSE
+ dst[31:0] := 0
+FI
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBSS" form="xmm {z}, xmm, xmm" xed="VSUBSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_sub_round_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := a[31:0] - b[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VSUBSS" form="xmm, xmm, xmm {er}" xed="VSUBSS_XMMf32_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKHPD" form="zmm {k}, zmm, zmm" xed="VUNPCKHPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKHPD" form="zmm {z}, zmm, zmm" xed="VUNPCKHPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_QWORDS(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_HIGH_QWORDS(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_HIGH_QWORDS(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKHPD" form="zmm, zmm, zmm" xed="VUNPCKHPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKHPS" form="zmm {k}, zmm, zmm" xed="VUNPCKHPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKHPS" form="zmm {z}, zmm, zmm" xed="VUNPCKHPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the high half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_HIGH_DWORDS(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_HIGH_DWORDS(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_HIGH_DWORDS(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKHPS" form="zmm, zmm, zmm" xed="VUNPCKHPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKLPD" form="zmm {k}, zmm, zmm" xed="VUNPCKLPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384])
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := tmp_dst[i+63:i]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKLPD" form="zmm {z}, zmm, zmm" xed="VUNPCKLPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_QWORDS(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_QWORDS(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_QWORDS(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKLPD" form="zmm, zmm, zmm" xed="VUNPCKLPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_unpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKLPS" form="zmm {k}, zmm, zmm" xed="VUNPCKLPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_unpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+tmp_dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+tmp_dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+tmp_dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256])
+tmp_dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKLPS" form="zmm {z}, zmm, zmm" xed="VUNPCKLPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_unpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the low half of each 128-bit lane in "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+dst[255:128] := INTERLEAVE_DWORDS(a[255:128], b[255:128])
+dst[383:256] := INTERLEAVE_DWORDS(a[383:256], b[383:256])
+dst[511:384] := INTERLEAVE_DWORDS(a[511:384], b[511:384])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VUNPCKLPS" form="zmm, zmm, zmm" xed="VUNPCKLPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castpd128_pd512">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m128d to type __m512d; the upper 384 bits of the result are undefined.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castpd256_pd512">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m256d to type __m512d; the upper 256 bits of the result are undefined.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castpd512_pd128">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m512d to type __m128d.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castps512_ps128">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m512 to type __m128.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castpd512_pd256">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m512d to type __m256d.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castps128_ps512">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m128 to type __m512; the upper 384 bits of the result are undefined.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castps256_ps512">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m256 to type __m512; the upper 256 bits of the result are undefined.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castps512_ps256">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m512 to type __m256.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castsi128_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="__m128i" varname="a" etype="M512"/>
+ <description>Cast vector of type __m128i to type __m512i; the upper 384 bits of the result are undefined.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castsi256_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="__m256i" varname="a" etype="M512"/>
+ <description>Cast vector of type __m256i to type __m512i; the upper 256 bits of the result are undefined.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castsi512_si128">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m512i" varname="a" etype="M128"/>
+ <description>Cast vector of type __m512i to type __m128i.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_castsi512_si256">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m256i" varname="dst" etype="M256"/>
+ <parameter type="__m512i" varname="a" etype="M256"/>
+ <description>Cast vector of type __m512i to type __m256i.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_zextpd128_pd512">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m128d to type __m512d; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_zextps128_ps512">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m128 to type __m512; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_zextsi128_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="__m128i" varname="a" etype="M512"/>
+ <description>Cast vector of type __m128i to type __m512i; the upper 384 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_zextpd256_pd512">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m256d to type __m512d; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_zextps256_ps512">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m256 to type __m512; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_zextsi256_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Cast</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="__m256i" varname="a" etype="M512"/>
+ <description>Cast vector of type __m256i to type __m512i; the upper 256 bits of the result are zeroed. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set1_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="a" etype="FP64"/>
+ <description>Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set1_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="a" etype="FP32"/>
+ <description>Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set4_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="d" etype="UI32"/>
+ <parameter type="int" varname="c" etype="UI32"/>
+ <parameter type="int" varname="b" etype="UI32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Set packed 32-bit integers in "dst" with the repeated 4 element sequence.</description>
+ <operation>
+dst[31:0] := a
+dst[63:32] := b
+dst[95:64] := c
+dst[127:96] := d
+dst[159:128] := a
+dst[191:160] := b
+dst[223:192] := c
+dst[255:224] := d
+dst[287:256] := a
+dst[319:288] := b
+dst[351:320] := c
+dst[383:352] := d
+dst[415:384] := a
+dst[447:416] := b
+dst[479:448] := c
+dst[511:480] := d
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set4_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="d" etype="UI64"/>
+ <parameter type="__int64" varname="c" etype="UI64"/>
+ <parameter type="__int64" varname="b" etype="UI64"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Set packed 64-bit integers in "dst" with the repeated 4 element sequence.</description>
+ <operation>
+dst[63:0] := a
+dst[127:64] := b
+dst[191:128] := c
+dst[255:192] := d
+dst[319:256] := a
+dst[383:320] := b
+dst[447:384] := c
+dst[511:448] := d
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set4_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="d" etype="FP64"/>
+ <parameter type="double" varname="c" etype="FP64"/>
+ <parameter type="double" varname="b" etype="FP64"/>
+ <parameter type="double" varname="a" etype="FP64"/>
+ <description>Set packed double-precision (64-bit) floating-point elements in "dst" with the repeated 4 element sequence.</description>
+ <operation>
+dst[63:0] := a
+dst[127:64] := b
+dst[191:128] := c
+dst[255:192] := d
+dst[319:256] := a
+dst[383:320] := b
+dst[447:384] := c
+dst[511:448] := d
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set4_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="d" etype="FP32"/>
+ <parameter type="float" varname="c" etype="FP32"/>
+ <parameter type="float" varname="b" etype="FP32"/>
+ <parameter type="float" varname="a" etype="FP32"/>
+ <description>Set packed single-precision (32-bit) floating-point elements in "dst" with the repeated 4 element sequence.</description>
+ <operation>
+dst[31:0] := a
+dst[63:32] := b
+dst[95:64] := c
+dst[127:96] := d
+dst[159:128] := a
+dst[191:160] := b
+dst[223:192] := c
+dst[255:224] := d
+dst[287:256] := a
+dst[319:288] := b
+dst[351:320] := c
+dst[383:352] := d
+dst[415:384] := a
+dst[447:416] := b
+dst[479:448] := c
+dst[511:480] := d
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="char" varname="e63" etype="UI8"/>
+ <parameter type="char" varname="e62" etype="UI8"/>
+ <parameter type="char" varname="e61" etype="UI8"/>
+ <parameter type="char" varname="e60" etype="UI8"/>
+ <parameter type="char" varname="e59" etype="UI8"/>
+ <parameter type="char" varname="e58" etype="UI8"/>
+ <parameter type="char" varname="e57" etype="UI8"/>
+ <parameter type="char" varname="e56" etype="UI8"/>
+ <parameter type="char" varname="e55" etype="UI8"/>
+ <parameter type="char" varname="e54" etype="UI8"/>
+ <parameter type="char" varname="e53" etype="UI8"/>
+ <parameter type="char" varname="e52" etype="UI8"/>
+ <parameter type="char" varname="e51" etype="UI8"/>
+ <parameter type="char" varname="e50" etype="UI8"/>
+ <parameter type="char" varname="e49" etype="UI8"/>
+ <parameter type="char" varname="e48" etype="UI8"/>
+ <parameter type="char" varname="e47" etype="UI8"/>
+ <parameter type="char" varname="e46" etype="UI8"/>
+ <parameter type="char" varname="e45" etype="UI8"/>
+ <parameter type="char" varname="e44" etype="UI8"/>
+ <parameter type="char" varname="e43" etype="UI8"/>
+ <parameter type="char" varname="e42" etype="UI8"/>
+ <parameter type="char" varname="e41" etype="UI8"/>
+ <parameter type="char" varname="e40" etype="UI8"/>
+ <parameter type="char" varname="e39" etype="UI8"/>
+ <parameter type="char" varname="e38" etype="UI8"/>
+ <parameter type="char" varname="e37" etype="UI8"/>
+ <parameter type="char" varname="e36" etype="UI8"/>
+ <parameter type="char" varname="e35" etype="UI8"/>
+ <parameter type="char" varname="e34" etype="UI8"/>
+ <parameter type="char" varname="e33" etype="UI8"/>
+ <parameter type="char" varname="e32" etype="UI8"/>
+ <parameter type="char" varname="e31" etype="UI8"/>
+ <parameter type="char" varname="e30" etype="UI8"/>
+ <parameter type="char" varname="e29" etype="UI8"/>
+ <parameter type="char" varname="e28" etype="UI8"/>
+ <parameter type="char" varname="e27" etype="UI8"/>
+ <parameter type="char" varname="e26" etype="UI8"/>
+ <parameter type="char" varname="e25" etype="UI8"/>
+ <parameter type="char" varname="e24" etype="UI8"/>
+ <parameter type="char" varname="e23" etype="UI8"/>
+ <parameter type="char" varname="e22" etype="UI8"/>
+ <parameter type="char" varname="e21" etype="UI8"/>
+ <parameter type="char" varname="e20" etype="UI8"/>
+ <parameter type="char" varname="e19" etype="UI8"/>
+ <parameter type="char" varname="e18" etype="UI8"/>
+ <parameter type="char" varname="e17" etype="UI8"/>
+ <parameter type="char" varname="e16" etype="UI8"/>
+ <parameter type="char" varname="e15" etype="UI8"/>
+ <parameter type="char" varname="e14" etype="UI8"/>
+ <parameter type="char" varname="e13" etype="UI8"/>
+ <parameter type="char" varname="e12" etype="UI8"/>
+ <parameter type="char" varname="e11" etype="UI8"/>
+ <parameter type="char" varname="e10" etype="UI8"/>
+ <parameter type="char" varname="e9" etype="UI8"/>
+ <parameter type="char" varname="e8" etype="UI8"/>
+ <parameter type="char" varname="e7" etype="UI8"/>
+ <parameter type="char" varname="e6" etype="UI8"/>
+ <parameter type="char" varname="e5" etype="UI8"/>
+ <parameter type="char" varname="e4" etype="UI8"/>
+ <parameter type="char" varname="e3" etype="UI8"/>
+ <parameter type="char" varname="e2" etype="UI8"/>
+ <parameter type="char" varname="e1" etype="UI8"/>
+ <parameter type="char" varname="e0" etype="UI8"/>
+ <description>Set packed 8-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[7:0] := e0
+dst[15:8] := e1
+dst[23:16] := e2
+dst[31:24] := e3
+dst[39:32] := e4
+dst[47:40] := e5
+dst[55:48] := e6
+dst[63:56] := e7
+dst[71:64] := e8
+dst[79:72] := e9
+dst[87:80] := e10
+dst[95:88] := e11
+dst[103:96] := e12
+dst[111:104] := e13
+dst[119:112] := e14
+dst[127:120] := e15
+dst[135:128] := e16
+dst[143:136] := e17
+dst[151:144] := e18
+dst[159:152] := e19
+dst[167:160] := e20
+dst[175:168] := e21
+dst[183:176] := e22
+dst[191:184] := e23
+dst[199:192] := e24
+dst[207:200] := e25
+dst[215:208] := e26
+dst[223:216] := e27
+dst[231:224] := e28
+dst[239:232] := e29
+dst[247:240] := e30
+dst[255:248] := e31
+dst[263:256] := e32
+dst[271:264] := e33
+dst[279:272] := e34
+dst[287:280] := e35
+dst[295:288] := e36
+dst[303:296] := e37
+dst[311:304] := e38
+dst[319:312] := e39
+dst[327:320] := e40
+dst[335:328] := e41
+dst[343:336] := e42
+dst[351:344] := e43
+dst[359:352] := e44
+dst[367:360] := e45
+dst[375:368] := e46
+dst[383:376] := e47
+dst[391:384] := e48
+dst[399:392] := e49
+dst[407:400] := e50
+dst[415:408] := e51
+dst[423:416] := e52
+dst[431:424] := e53
+dst[439:432] := e54
+dst[447:440] := e55
+dst[455:448] := e56
+dst[463:456] := e57
+dst[471:464] := e58
+dst[479:472] := e59
+dst[487:480] := e60
+dst[495:488] := e61
+dst[503:496] := e62
+dst[511:504] := e63
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="short" varname="e31" etype="UI16"/>
+ <parameter type="short" varname="e30" etype="UI16"/>
+ <parameter type="short" varname="e29" etype="UI16"/>
+ <parameter type="short" varname="e28" etype="UI16"/>
+ <parameter type="short" varname="e27" etype="UI16"/>
+ <parameter type="short" varname="e26" etype="UI16"/>
+ <parameter type="short" varname="e25" etype="UI16"/>
+ <parameter type="short" varname="e24" etype="UI16"/>
+ <parameter type="short" varname="e23" etype="UI16"/>
+ <parameter type="short" varname="e22" etype="UI16"/>
+ <parameter type="short" varname="e21" etype="UI16"/>
+ <parameter type="short" varname="e20" etype="UI16"/>
+ <parameter type="short" varname="e19" etype="UI16"/>
+ <parameter type="short" varname="e18" etype="UI16"/>
+ <parameter type="short" varname="e17" etype="UI16"/>
+ <parameter type="short" varname="e16" etype="UI16"/>
+ <parameter type="short" varname="e15" etype="UI16"/>
+ <parameter type="short" varname="e14" etype="UI16"/>
+ <parameter type="short" varname="e13" etype="UI16"/>
+ <parameter type="short" varname="e12" etype="UI16"/>
+ <parameter type="short" varname="e11" etype="UI16"/>
+ <parameter type="short" varname="e10" etype="UI16"/>
+ <parameter type="short" varname="e9" etype="UI16"/>
+ <parameter type="short" varname="e8" etype="UI16"/>
+ <parameter type="short" varname="e7" etype="UI16"/>
+ <parameter type="short" varname="e6" etype="UI16"/>
+ <parameter type="short" varname="e5" etype="UI16"/>
+ <parameter type="short" varname="e4" etype="UI16"/>
+ <parameter type="short" varname="e3" etype="UI16"/>
+ <parameter type="short" varname="e2" etype="UI16"/>
+ <parameter type="short" varname="e1" etype="UI16"/>
+ <parameter type="short" varname="e0" etype="UI16"/>
+ <description>Set packed 16-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[15:0] := e0
+dst[31:16] := e1
+dst[47:32] := e2
+dst[63:48] := e3
+dst[79:64] := e4
+dst[95:80] := e5
+dst[111:96] := e6
+dst[127:112] := e7
+dst[143:128] := e8
+dst[159:144] := e9
+dst[175:160] := e10
+dst[191:176] := e11
+dst[207:192] := e12
+dst[223:208] := e13
+dst[239:224] := e14
+dst[255:240] := e15
+dst[271:256] := e16
+dst[287:272] := e17
+dst[303:288] := e18
+dst[319:304] := e19
+dst[335:320] := e20
+dst[351:336] := e21
+dst[367:352] := e22
+dst[383:368] := e23
+dst[399:384] := e24
+dst[415:400] := e25
+dst[431:416] := e26
+dst[447:432] := e27
+dst[463:448] := e28
+dst[479:464] := e29
+dst[495:480] := e30
+dst[511:496] := e31
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="e15" etype="UI32"/>
+ <parameter type="int" varname="e14" etype="UI32"/>
+ <parameter type="int" varname="e13" etype="UI32"/>
+ <parameter type="int" varname="e12" etype="UI32"/>
+ <parameter type="int" varname="e11" etype="UI32"/>
+ <parameter type="int" varname="e10" etype="UI32"/>
+ <parameter type="int" varname="e9" etype="UI32"/>
+ <parameter type="int" varname="e8" etype="UI32"/>
+ <parameter type="int" varname="e7" etype="UI32"/>
+ <parameter type="int" varname="e6" etype="UI32"/>
+ <parameter type="int" varname="e5" etype="UI32"/>
+ <parameter type="int" varname="e4" etype="UI32"/>
+ <parameter type="int" varname="e3" etype="UI32"/>
+ <parameter type="int" varname="e2" etype="UI32"/>
+ <parameter type="int" varname="e1" etype="UI32"/>
+ <parameter type="int" varname="e0" etype="UI32"/>
+ <description>Set packed 32-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[31:0] := e0
+dst[63:32] := e1
+dst[95:64] := e2
+dst[127:96] := e3
+dst[159:128] := e4
+dst[191:160] := e5
+dst[223:192] := e6
+dst[255:224] := e7
+dst[287:256] := e8
+dst[319:288] := e9
+dst[351:320] := e10
+dst[383:352] := e11
+dst[415:384] := e12
+dst[447:416] := e13
+dst[479:448] := e14
+dst[511:480] := e15
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="e7" etype="UI64"/>
+ <parameter type="__int64" varname="e6" etype="UI64"/>
+ <parameter type="__int64" varname="e5" etype="UI64"/>
+ <parameter type="__int64" varname="e4" etype="UI64"/>
+ <parameter type="__int64" varname="e3" etype="UI64"/>
+ <parameter type="__int64" varname="e2" etype="UI64"/>
+ <parameter type="__int64" varname="e1" etype="UI64"/>
+ <parameter type="__int64" varname="e0" etype="UI64"/>
+ <description>Set packed 64-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[63:0] := e0
+dst[127:64] := e1
+dst[191:128] := e2
+dst[255:192] := e3
+dst[319:256] := e4
+dst[383:320] := e5
+dst[447:384] := e6
+dst[511:448] := e7
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="e7" etype="FP64"/>
+ <parameter type="double" varname="e6" etype="FP64"/>
+ <parameter type="double" varname="e5" etype="FP64"/>
+ <parameter type="double" varname="e4" etype="FP64"/>
+ <parameter type="double" varname="e3" etype="FP64"/>
+ <parameter type="double" varname="e2" etype="FP64"/>
+ <parameter type="double" varname="e1" etype="FP64"/>
+ <parameter type="double" varname="e0" etype="FP64"/>
+ <description>Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values.</description>
+ <operation>
+dst[63:0] := e0
+dst[127:64] := e1
+dst[191:128] := e2
+dst[255:192] := e3
+dst[319:256] := e4
+dst[383:320] := e5
+dst[447:384] := e6
+dst[511:448] := e7
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_set_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="e15" etype="FP32"/>
+ <parameter type="float" varname="e14" etype="FP32"/>
+ <parameter type="float" varname="e13" etype="FP32"/>
+ <parameter type="float" varname="e12" etype="FP32"/>
+ <parameter type="float" varname="e11" etype="FP32"/>
+ <parameter type="float" varname="e10" etype="FP32"/>
+ <parameter type="float" varname="e9" etype="FP32"/>
+ <parameter type="float" varname="e8" etype="FP32"/>
+ <parameter type="float" varname="e7" etype="FP32"/>
+ <parameter type="float" varname="e6" etype="FP32"/>
+ <parameter type="float" varname="e5" etype="FP32"/>
+ <parameter type="float" varname="e4" etype="FP32"/>
+ <parameter type="float" varname="e3" etype="FP32"/>
+ <parameter type="float" varname="e2" etype="FP32"/>
+ <parameter type="float" varname="e1" etype="FP32"/>
+ <parameter type="float" varname="e0" etype="FP32"/>
+ <description>Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values.</description>
+ <operation>
+dst[31:0] := e0
+dst[63:32] := e1
+dst[95:64] := e2
+dst[127:96] := e3
+dst[159:128] := e4
+dst[191:160] := e5
+dst[223:192] := e6
+dst[255:224] := e7
+dst[287:256] := e8
+dst[319:288] := e9
+dst[351:320] := e10
+dst[383:352] := e11
+dst[415:384] := e12
+dst[447:416] := e13
+dst[479:448] := e14
+dst[511:480] := e15
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_setr4_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="d" etype="UI32"/>
+ <parameter type="int" varname="c" etype="UI32"/>
+ <parameter type="int" varname="b" etype="UI32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Set packed 32-bit integers in "dst" with the repeated 4 element sequence in reverse order.</description>
+ <operation>
+dst[31:0] := d
+dst[63:32] := c
+dst[95:64] := b
+dst[127:96] := a
+dst[159:128] := d
+dst[191:160] := c
+dst[223:192] := b
+dst[255:224] := a
+dst[287:256] := d
+dst[319:288] := c
+dst[351:320] := b
+dst[383:352] := a
+dst[415:384] := d
+dst[447:416] := c
+dst[479:448] := b
+dst[511:480] := a
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_setr4_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="d" etype="UI64"/>
+ <parameter type="__int64" varname="c" etype="UI64"/>
+ <parameter type="__int64" varname="b" etype="UI64"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Set packed 64-bit integers in "dst" with the repeated 4 element sequence in reverse order.</description>
+ <operation>
+dst[63:0] := d
+dst[127:64] := c
+dst[191:128] := b
+dst[255:192] := a
+dst[319:256] := d
+dst[383:320] := c
+dst[447:384] := b
+dst[511:448] := a
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_setr4_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="d" etype="FP64"/>
+ <parameter type="double" varname="c" etype="FP64"/>
+ <parameter type="double" varname="b" etype="FP64"/>
+ <parameter type="double" varname="a" etype="FP64"/>
+ <description>Set packed double-precision (64-bit) floating-point elements in "dst" with the repeated 4 element sequence in reverse order.</description>
+ <operation>
+dst[63:0] := d
+dst[127:64] := c
+dst[191:128] := b
+dst[255:192] := a
+dst[319:256] := d
+dst[383:320] := c
+dst[447:384] := b
+dst[511:448] := a
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_setr4_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="d" etype="FP32"/>
+ <parameter type="float" varname="c" etype="FP32"/>
+ <parameter type="float" varname="b" etype="FP32"/>
+ <parameter type="float" varname="a" etype="FP32"/>
+ <description>Set packed single-precision (32-bit) floating-point elements in "dst" with the repeated 4 element sequence in reverse order.</description>
+ <operation>
+dst[31:0] := d
+dst[63:32] := c
+dst[95:64] := b
+dst[127:96] := a
+dst[159:128] := d
+dst[191:160] := c
+dst[223:192] := b
+dst[255:224] := a
+dst[287:256] := d
+dst[319:288] := c
+dst[351:320] := b
+dst[383:352] := a
+dst[415:384] := d
+dst[447:416] := c
+dst[479:448] := b
+dst[511:480] := a
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_setr_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="e15" etype="UI32"/>
+ <parameter type="int" varname="e14" etype="UI32"/>
+ <parameter type="int" varname="e13" etype="UI32"/>
+ <parameter type="int" varname="e12" etype="UI32"/>
+ <parameter type="int" varname="e11" etype="UI32"/>
+ <parameter type="int" varname="e10" etype="UI32"/>
+ <parameter type="int" varname="e9" etype="UI32"/>
+ <parameter type="int" varname="e8" etype="UI32"/>
+ <parameter type="int" varname="e7" etype="UI32"/>
+ <parameter type="int" varname="e6" etype="UI32"/>
+ <parameter type="int" varname="e5" etype="UI32"/>
+ <parameter type="int" varname="e4" etype="UI32"/>
+ <parameter type="int" varname="e3" etype="UI32"/>
+ <parameter type="int" varname="e2" etype="UI32"/>
+ <parameter type="int" varname="e1" etype="UI32"/>
+ <parameter type="int" varname="e0" etype="UI32"/>
+ <description>Set packed 32-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[31:0] := e15
+dst[63:32] := e14
+dst[95:64] := e13
+dst[127:96] := e12
+dst[159:128] := e11
+dst[191:160] := e10
+dst[223:192] := e9
+dst[255:224] := e8
+dst[287:256] := e7
+dst[319:288] := e6
+dst[351:320] := e5
+dst[383:352] := e4
+dst[415:384] := e3
+dst[447:416] := e2
+dst[479:448] := e1
+dst[511:480] := e0
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_setr_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="e7" etype="UI64"/>
+ <parameter type="__int64" varname="e6" etype="UI64"/>
+ <parameter type="__int64" varname="e5" etype="UI64"/>
+ <parameter type="__int64" varname="e4" etype="UI64"/>
+ <parameter type="__int64" varname="e3" etype="UI64"/>
+ <parameter type="__int64" varname="e2" etype="UI64"/>
+ <parameter type="__int64" varname="e1" etype="UI64"/>
+ <parameter type="__int64" varname="e0" etype="UI64"/>
+ <description>Set packed 64-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[63:0] := e7
+dst[127:64] := e6
+dst[191:128] := e5
+dst[255:192] := e4
+dst[319:256] := e3
+dst[383:320] := e2
+dst[447:384] := e1
+dst[511:448] := e0
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_setr_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="e7" etype="FP64"/>
+ <parameter type="double" varname="e6" etype="FP64"/>
+ <parameter type="double" varname="e5" etype="FP64"/>
+ <parameter type="double" varname="e4" etype="FP64"/>
+ <parameter type="double" varname="e3" etype="FP64"/>
+ <parameter type="double" varname="e2" etype="FP64"/>
+ <parameter type="double" varname="e1" etype="FP64"/>
+ <parameter type="double" varname="e0" etype="FP64"/>
+ <description>Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[63:0] := e7
+dst[127:64] := e6
+dst[191:128] := e5
+dst[255:192] := e4
+dst[319:256] := e3
+dst[383:320] := e2
+dst[447:384] := e1
+dst[511:448] := e0
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_setr_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="e15" etype="FP32"/>
+ <parameter type="float" varname="e14" etype="FP32"/>
+ <parameter type="float" varname="e13" etype="FP32"/>
+ <parameter type="float" varname="e12" etype="FP32"/>
+ <parameter type="float" varname="e11" etype="FP32"/>
+ <parameter type="float" varname="e10" etype="FP32"/>
+ <parameter type="float" varname="e9" etype="FP32"/>
+ <parameter type="float" varname="e8" etype="FP32"/>
+ <parameter type="float" varname="e7" etype="FP32"/>
+ <parameter type="float" varname="e6" etype="FP32"/>
+ <parameter type="float" varname="e5" etype="FP32"/>
+ <parameter type="float" varname="e4" etype="FP32"/>
+ <parameter type="float" varname="e3" etype="FP32"/>
+ <parameter type="float" varname="e2" etype="FP32"/>
+ <parameter type="float" varname="e1" etype="FP32"/>
+ <parameter type="float" varname="e0" etype="FP32"/>
+ <description>Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[31:0] := e15
+dst[63:32] := e14
+dst[95:64] := e13
+dst[127:96] := e12
+dst[159:128] := e11
+dst[191:160] := e10
+dst[223:192] := e9
+dst[255:224] := e8
+dst[287:256] := e7
+dst[319:288] := e6
+dst[351:320] := e5
+dst[383:352] := e4
+dst[415:384] := e3
+dst[447:416] := e2
+dst[479:448] := e1
+dst[511:480] := e0
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_setzero">
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m512 with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="VPXORQ" form="zmm, zmm, zmm" xed="VPXORQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_setzero_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <description>Return vector of type __m512i with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="VPXORQ" form="zmm, zmm, zmm" xed="VPXORQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_setzero_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <description>Return vector of type __m512d with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="VPXORQ" form="zmm, zmm, zmm" xed="VPXORQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_setzero_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <description>Return vector of type __m512 with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="VPXORQ" form="zmm, zmm, zmm" xed="VPXORQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_setzero_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Set</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <description>Return vector of type __m512i with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="VPXORQ" form="zmm, zmm, zmm" xed="VPXORQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_undefined">
+ <CPUID>AVX512F</CPUID>
+ <category>General Support</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m512 with undefined elements.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_undefined_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>General Support</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <description>Return vector of type __m512i with undefined elements.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_undefined_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>General Support</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <description>Return vector of type __m512d with undefined elements.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_undefined_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>General Support</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <description>Return vector of type __m512 with undefined elements.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_acos_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ACOS(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_acos_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ACOS(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_acos_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ACOS(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_acos_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ACOS(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_acosh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ACOSH(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_acosh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ACOSH(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_acosh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ACOSH(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_acosh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ACOSH(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_asin_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ASIN(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_asin_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ASIN(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_asin_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ASIN(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_asin_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ASIN(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_asinh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ASINH(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_asinh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ASINH(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_asinh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ASINH(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_asinh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ASINH(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_atan2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_atan2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_atan2_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_atan2_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_atan_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" and store the results in "dst" expressed in radians.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ATAN(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_atan_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ATAN(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_atan_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" expressed in radians.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ATAN(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_atan_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ATAN(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_atanh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" and store the results in "dst" expressed in radians.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ATANH(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_atanh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" expressed in radians using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ATANH(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_atanh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperblic tangent of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" expressed in radians.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ATANH(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_atanh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ATANH(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cbrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := CubeRoot(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cbrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := CubeRoot(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cbrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := CubeRoot(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cbrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := CubeRoot(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cdfnorm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := CDFNormal(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cdfnorm_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := CDFNormal(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cdfnorm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := CDFNormal(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cdfnorm_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := CDFNormal(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cdfnorminv_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := InverseCDFNormal(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cdfnorminv_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := InverseCDFNormal(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cdfnorminv_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := InverseCDFNormal(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cdfnorminv_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := InverseCDFNormal(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_ceil_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := CEIL(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_ceil_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := CEIL(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_ceil_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := CEIL(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_ceil_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := CEIL(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cos_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := COS(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cos_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := COS(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cos_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := COS(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cos_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := COS(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cosd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := COSD(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cosd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := COSD(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cosd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := COSD(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cosd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := COSD(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cosh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := COSH(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cosh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := COSH(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_cosh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := COSH(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_cosh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := COSH(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_erf_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ERF(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_erf_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ERF(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_erfc_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := 1.0 - ERF(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_erfc_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := 1.0 - ERF(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_erf_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ERF(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_erf_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ERF(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_erfc_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+63:i] := 1.0 - ERF(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_erfc_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+63:i] := 1.0 - ERF(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_erfinv_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := 1.0 / ERF(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_erfinv_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := 1.0 / ERF(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_erfinv_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+63:i] := 1.0 / ERF(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_erfinv_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+63:i] := 1.0 / ERF(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_erfcinv_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i]))
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_erfcinv_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i]))
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_erfcinv_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i]))
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_erfcinv_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i]))
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_exp10_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := POW(10.0, a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_exp10_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POW(10.0, a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_exp10_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := POW(FP32(10.0), a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_exp10_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POW(FP32(10.0), a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_exp2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := POW(2.0, a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_exp2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POW(2.0, a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_exp2_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := POW(FP32(2.0), a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_exp2_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POW(FP32(2.0), a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_exp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := POW(e, a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_exp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POW(e, a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_exp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := POW(FP32(e), a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_exp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POW(FP32(e), a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_expm1_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := POW(e, a[i+63:i]) - 1.0
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_expm1_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POW(e, a[i+63:i]) - 1.0
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_expm1_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_expm1_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_floor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := FLOOR(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_floor_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FLOOR(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_floor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := FLOOR(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_floor_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FLOOR(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_hypot_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0))
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_hypot_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0))
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_hypot_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0))
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_hypot_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0))
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_div_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Divide packed signed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF b[i+31:i] == 0
+ #DE
+ FI
+ dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_div_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Divide packed signed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ IF b[i+31:i] == 0
+ #DE
+ FI
+ dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_div_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="SI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Divide packed signed 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := 8*j
+ IF b[i+7:i] == 0
+ #DE
+ FI
+ dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_div_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Divide packed signed 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ IF b[i+15:i] == 0
+ #DE
+ FI
+ dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_div_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <parameter type="__m512i" varname="b" etype="SI64"/>
+ <description>Divide packed signed 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ IF b[i+63:i] == 0
+ #DE
+ FI
+ dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_invsqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := InvSQRT(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_invsqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := InvSQRT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_invsqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := InvSQRT(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_invsqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := InvSQRT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_rem_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_rem_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_rem_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Divide packed 8-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 63
+ i := 8*j
+ dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_rem_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Divide packed 16-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 31
+ i := 16*j
+ dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_rem_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Divide packed 64-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := 64*j
+ dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_log10_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_log10_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0)
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_log10_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_log10_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0)
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_log1p_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := LOG(1.0 + a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_log1p_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LOG(1.0 + a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_log1p_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := LOG(1.0 + a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_log1p_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LOG(1.0 + a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_log2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_log2_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0)
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_log_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := LOG(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_log_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := LOG(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_log_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := LOG(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_log_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LOG(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_logb_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_logb_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_logb_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_logb_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_nearbyint_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Rounds each packed double-precision (64-bit) floating-point element in "a" to the nearest integer value and stores the results as packed double-precision floating-point elements in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := NearbyInt(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_nearbyint_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Rounds each packed double-precision (64-bit) floating-point element in "a" to the nearest integer value and stores the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := NearbyInt(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_nearbyint_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Rounds each packed single-precision (32-bit) floating-point element in "a" to the nearest integer value and stores the results as packed double-precision floating-point elements in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := NearbyInt(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_nearbyint_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Rounds each packed single-precision (32-bit) floating-point element in "a" to the nearest integer value and stores the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := NearbyInt(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_pow_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := POW(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_pow_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POW(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_pow_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := POW(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_pow_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POW(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_recip_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Computes the reciprocal of packed double-precision (64-bit) floating-point elements in "a", storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_recip_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Computes the reciprocal of packed double-precision (64-bit) floating-point elements in "a", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (1.0 / a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_recip_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Computes the reciprocal of packed single-precision (32-bit) floating-point elements in "a", storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_recip_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Computes the reciprocal of packed single-precision (32-bit) floating-point elements in "a", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_rint_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Rounds the packed double-precision (64-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := RoundToNearestEven(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_rint_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Rounds the packed double-precision (64-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := RoundToNearestEven(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_rint_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Rounds the packed single-precision (32-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := RoundToNearestEven(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_rint_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Rounds the packed single-precision (32-bit) floating-point elements in "a" to the nearest even integer value and stores the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := RoundToNearestEven(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_svml_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ROUND(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_svml_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ROUND(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_sin_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := SIN(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_sin_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SIN(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_sin_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := SIN(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_sin_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SIN(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_sinh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := SINH(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_sinh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SINH(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_sinh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := SINH(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_sinh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SINH(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_sind_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := SIND(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_sind_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SIND(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_sind_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := SIND(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_sind_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SIND(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_tan_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := TAN(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_tan_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := TAN(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_tan_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := TAN(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_tan_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := TAN(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_tand_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := TAND(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_tand_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := TAND(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_tand_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := TAND(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_tand_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := TAND(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_tanh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := TANH(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_tanh_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := TANH(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_tanh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := TANH(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_tanh_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := TANH(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_trunc_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := TRUNCATE(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_trunc_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := TRUNCATE(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_trunc_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := TRUNCATE(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_trunc_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := TRUNCATE(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_div_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF b[i+31:i] == 0
+ #DE
+ FI
+ dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_div_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ IF b[i+31:i] == 0
+ #DE
+ FI
+ dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_div_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := 8*j
+ IF b[i+7:i] == 0
+ #DE
+ FI
+ dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_div_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := 16*j
+ IF b[i+15:i] == 0
+ #DE
+ FI
+ dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_div_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 64*j
+ IF b[i+63:i] == 0
+ #DE
+ FI
+ dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_rem_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := 32*j
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_rem_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := 32*j
+ IF k[j]
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_rem_epu8">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 63
+ i := 8*j
+ dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_rem_epu16">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <description>Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 31
+ i := 16*j
+ dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_rem_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := 64*j
+ dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_kortestz">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <description>Performs bitwise OR between "k1" and "k2", storing the result in "dst". ZF flag is set if "dst" is 0.</description>
+ <operation>dst[15:0] := k1[15:0] | k2[15:0]
+IF dst == 0
+ SetZF()
+FI
+ </operation>
+ <instruction name="KORTESTW" form="k, k" xed="KORTESTW_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_kortestc">
+ <type>Mask</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <description>Performs bitwise OR between "k1" and "k2", storing the result in "dst". CF flag is set if "dst" consists of all 1's.</description>
+ <operation>dst[15:0] := k1[15:0] | k2[15:0]
+IF PopCount(dst[15:0]) == 16
+ SetCF()
+FI
+ </operation>
+ <instruction name="KORTESTW" form="k, k" xed="KORTESTW_MASKmskw_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask2int">
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <description>Converts bit mask "k1" into an integer value, storing the results in "dst".</description>
+ <operation>
+dst := ZeroExtend32(k1)
+ </operation>
+ <instruction name="KMOVW" form="r32, k" xed="KMOVW_GPR32u32_MASKmskw_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_int2mask">
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="dst" etype="MASK"/>
+ <parameter type="int" varname="mask" etype="UI16"/>
+ <description>Converts integer "mask" into bitmask, storing the result in "dst".</description>
+ <operation>
+dst := mask[15:0]
+ </operation>
+ <instruction name="KMOVW" form="k, r32" xed="KMOVW_MASKmskw_GPR32u32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_mullox_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Multiplies elements in packed 64-bit integer vectors "a" and "b" together, storing the lower 64 bits of the result in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" sequence="TRUE" name="_mm512_mask_mullox_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Store</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Multiplies elements in packed 64-bit integer vectors "a" and "b" together, storing the lower 64 bits of the result in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_sincos_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d *" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the sine and cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := SIN(a[i+63:i])
+ MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+cos_res[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_sincos_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d *" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <parameter type="__m512d" varname="sin_src" etype="FP64"/>
+ <parameter type="__m512d" varname="cos_src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Compute the sine and cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", store the cosine into memory at "mem_addr". Elements are written to their respective locations using writemask "k" (elements are copied from "sin_src" or "cos_src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := SIN(a[i+63:i])
+ MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i])
+ ELSE
+ dst[i+63:i] := sin_src[i+63:i]
+ MEM[mem_addr+i+63:mem_addr+i] := cos_src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+cos_res[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_sincos_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512 *" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the sine and cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := SIN(a[i+31:i])
+ MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+cos_res[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm512_mask_sincos_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512 *" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <parameter type="__m512" varname="sin_src" etype="FP32"/>
+ <parameter type="__m512" varname="cos_src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the sine and cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", store the cosine into memory at "mem_addr". Elements are written to their respective locations using writemask "k" (elements are copied from "sin_src" or "cos_src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := SIN(a[i+31:i])
+ MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i])
+ ELSE
+ dst[i+31:i] := sin_src[i+31:i]
+ MEM[mem_addr+i+31:mem_addr+i] := cos_src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+cos_res[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_cvtss_f32">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Copy the lower single-precision (32-bit) floating-point element of "a" to "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+ </operation>
+ <instruction name="VMOVSS" form="m32, xmm" xed="VMOVSS_MEMf32_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_cvtsd_f64">
+ <type>Floating Point</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Copy the lower double-precision (64-bit) floating-point element of "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+ </operation>
+ <instruction name="VMOVSD" form="m64, xmm" xed="VMOVSD_MEMq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_cvtsi512_si32">
+ <type>Integer</type>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Copy the lower 32-bit integer in "a" to "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+ </operation>
+ <instruction name="VMOVD" form="r32, xmm" xed="VMOVD_GPR32u32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_add_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPD" form="zmm, zmm, zmm" xed="VADDPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_add_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPD" form="zmm, zmm, zmm {er}" xed="VADDPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_add_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPD" form="zmm {k}, zmm, zmm" xed="VADDPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_add_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPD" form="zmm {k}, zmm, zmm {er}" xed="VADDPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_add_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPS" form="zmm, zmm, zmm" xed="VADDPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_add_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPS" form="zmm, zmm, zmm {er}" xed="VADDPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_add_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPS" form="zmm {k}, zmm, zmm" xed="VADDPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_add_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDPS" form="zmm {k}, zmm, zmm {er}" xed="VADDPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_alignr_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 64 bytes (16 elements) in "dst".</description>
+ <operation>
+temp[1023:512] := a[511:0]
+temp[511:0] := b[511:0]
+temp[1023:0] := temp[1023:0] &gt;&gt; (32*imm8[3:0])
+dst[511:0] := temp[511:0]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VALIGND" form="zmm, zmm, zmm, imm8" xed="VALIGND_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_alignr_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Concatenate "a" and "b" into a 128-byte immediate result, shift the result right by "imm8" 32-bit elements, and store the low 64 bytes (16 elements) in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+temp[1023:512] := a[511:0]
+temp[511:0] := b[511:0]
+temp[1023:0] := temp[1023:0] &gt;&gt; (32*imm8[3:0])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := temp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VALIGND" form="zmm {k}, zmm, zmm, imm8" xed="VALIGND_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_blend_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBLENDMPD" form="zmm {k}, zmm, zmm" xed="VBLENDMPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_blend_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VBLENDMPS" form="zmm {k}, zmm, zmm" xed="VBLENDMPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmp_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmp_round_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". [sae_note]</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ k[j] := (a[i+63:i] OP b[i+63:i]) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, zmm, zmm {sae}, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpeq_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := (a[i+63:i] == b[i+63:i]) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmple_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := (a[i+63:i] &lt;= b[i+63:i]) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmplt_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := (a[i+63:i] &lt; b[i+63:i]) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpneq_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := (a[i+63:i] != b[i+63:i]) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpnle_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := (!(a[i+63:i] &lt;= b[i+63:i])) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpnlt_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k[j] := (!(a[i+63:i] &lt; b[i+63:i])) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpord_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ k[j] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpunord_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ k[j] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 1 : 0
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmp_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmp_round_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := ( a[i+63:i] OP b[i+63:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, zmm, zmm {sae}, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpeq_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := (a[i+63:i] == b[i+63:i]) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmple_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := (a[i+63:i] &lt;= b[i+63:i]) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmplt_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := (a[i+63:i] &lt; b[i+63:i]) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpneq_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := (a[i+63:i] != b[i+63:i]) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpnle_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := (!(a[i+63:i] &lt;= b[i+63:i])) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpnlt_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := (!(a[i+63:i] &lt; b[i+63:i])) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpord_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpunord_pd_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__mmask8" varname="k1" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k1[j]
+ k[j] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:8] := 0
+ </operation>
+ <instruction name="VCMPPD" form="k {k}, zmm, zmm, imm8" xed="VCMPPD_MASKmskw_MASKmskw_ZMMf64_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmp_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmp_round_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k". [sae_note]</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ k[j] := (a[i+31:i] OP b[i+31:i]) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, zmm, zmm {sae}, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpeq_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := (a[i+31:i] == b[i+31:i]) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmple_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := (a[i+31:i] &lt;= b[i+31:i]) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmplt_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := (a[i+31:i] &lt; b[i+31:i]) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpneq_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := (a[i+31:i] != b[i+31:i]) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpnle_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := (!(a[i+31:i] &lt;= b[i+31:i])) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpnlt_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := (!(a[i+31:i] &lt; b[i+31:i])) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpord_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ k[j] := ((a[i+31:i] != NaN) AND (b[i+31:i] != NaN)) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpunord_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ k[j] := ((a[i+31:i] == NaN) OR (b[i+31:i] == NaN)) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmp_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmp_round_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immtype="_CMP_"/>
+ <parameter type="const int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set). [sae_note]</description>
+ <operation>CASE (imm8[4:0]) OF
+0: OP := _CMP_EQ_OQ
+1: OP := _CMP_LT_OS
+2: OP := _CMP_LE_OS
+3: OP := _CMP_UNORD_Q
+4: OP := _CMP_NEQ_UQ
+5: OP := _CMP_NLT_US
+6: OP := _CMP_NLE_US
+7: OP := _CMP_ORD_Q
+8: OP := _CMP_EQ_UQ
+9: OP := _CMP_NGE_US
+10: OP := _CMP_NGT_US
+11: OP := _CMP_FALSE_OQ
+12: OP := _CMP_NEQ_OQ
+13: OP := _CMP_GE_OS
+14: OP := _CMP_GT_OS
+15: OP := _CMP_TRUE_UQ
+16: OP := _CMP_EQ_OS
+17: OP := _CMP_LT_OQ
+18: OP := _CMP_LE_OQ
+19: OP := _CMP_UNORD_S
+20: OP := _CMP_NEQ_US
+21: OP := _CMP_NLT_UQ
+22: OP := _CMP_NLE_UQ
+23: OP := _CMP_ORD_S
+24: OP := _CMP_EQ_US
+25: OP := _CMP_NGE_UQ
+26: OP := _CMP_NGT_UQ
+27: OP := _CMP_FALSE_OS
+28: OP := _CMP_NEQ_OS
+29: OP := _CMP_GE_OQ
+30: OP := _CMP_GT_OQ
+31: OP := _CMP_TRUE_US
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, zmm, zmm {sae}, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpeq_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := (a[i+31:i] == b[i+31:i]) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmple_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := (a[i+31:i] &lt;= b[i+31:i]) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmplt_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := (a[i+31:i] &lt; b[i+31:i]) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpneq_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := (a[i+31:i] != b[i+31:i]) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpnle_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := (!(a[i+31:i] &lt;= b[i+31:i])) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpnlt_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := (!(a[i+31:i] &lt; b[i+31:i])) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpord_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ((a[i+31:i] != NaN) AND (b[i+31:i] != NaN)) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpunord_ps_mask">
+ <type>Floating Point</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ((a[i+31:i] == NaN) OR (b[i+31:i] == NaN)) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VCMPPS" form="k {k}, zmm, zmm, imm8" xed="VCMPPS_MASKmskw_MASKmskw_ZMMf32_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="zmm, zmm, zmm" xed="VFMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="zmm, zmm, zmm" xed="VFMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="zmm, zmm, zmm" xed="VFMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fmadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="zmm, zmm, zmm {er}" xed="VFMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="zmm, zmm, zmm {er}" xed="VFMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="zmm, zmm, zmm {er}" xed="VFMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="zmm {k}, zmm, zmm" xed="VFMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="zmm {k}, zmm, zmm" xed="VFMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="zmm {k}, zmm, zmm" xed="VFMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fmadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="zmm {k}, zmm, zmm" xed="VFMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="zmm {k}, zmm, zmm" xed="VFMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="zmm {k}, zmm, zmm" xed="VFMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fmadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD213PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMADD231PD" form="zmm {k}, zmm, zmm {er}" xed="VFMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="zmm, zmm, zmm" xed="VFMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="zmm, zmm, zmm" xed="VFMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="zmm, zmm, zmm" xed="VFMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fmadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="zmm, zmm, zmm {er}" xed="VFMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="zmm, zmm, zmm {er}" xed="VFMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="zmm, zmm, zmm {er}" xed="VFMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="zmm {k}, zmm, zmm" xed="VFMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="zmm {k}, zmm, zmm" xed="VFMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="zmm {k}, zmm, zmm" xed="VFMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fmadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="zmm {k}, zmm, zmm" xed="VFMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="zmm {k}, zmm, zmm" xed="VFMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="zmm {k}, zmm, zmm" xed="VFMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fmadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD213PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMADD231PS" form="zmm {k}, zmm, zmm {er}" xed="VFMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="zmm, zmm, zmm" xed="VFMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="zmm, zmm, zmm" xed="VFMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="zmm, zmm, zmm" xed="VFMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fmsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="zmm, zmm, zmm {er}" xed="VFMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="zmm, zmm, zmm {er}" xed="VFMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="zmm, zmm, zmm {er}" xed="VFMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="zmm {k}, zmm, zmm" xed="VFMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="zmm {k}, zmm, zmm" xed="VFMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="zmm {k}, zmm, zmm" xed="VFMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fmsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="zmm {k}, zmm, zmm" xed="VFMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="zmm {k}, zmm, zmm" xed="VFMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="zmm {k}, zmm, zmm" xed="VFMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fmsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB213PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFMSUB231PD" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="zmm, zmm, zmm" xed="VFMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="zmm, zmm, zmm" xed="VFMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="zmm, zmm, zmm" xed="VFMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fmsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="zmm, zmm, zmm {er}" xed="VFMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="zmm, zmm, zmm {er}" xed="VFMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="zmm, zmm, zmm {er}" xed="VFMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="zmm {k}, zmm, zmm" xed="VFMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="zmm {k}, zmm, zmm" xed="VFMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="zmm {k}, zmm, zmm" xed="VFMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fmsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="zmm {k}, zmm, zmm" xed="VFMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="zmm {k}, zmm, zmm" xed="VFMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="zmm {k}, zmm, zmm" xed="VFMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fmsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB213PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFMSUB231PS" form="zmm {k}, zmm, zmm {er}" xed="VFMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="zmm, zmm, zmm" xed="VFNMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="zmm, zmm, zmm" xed="VFNMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="zmm, zmm, zmm" xed="VFNMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fnmadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="zmm, zmm, zmm {er}" xed="VFNMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="zmm, zmm, zmm {er}" xed="VFNMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="zmm, zmm, zmm {er}" xed="VFNMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="zmm {k}, zmm, zmm" xed="VFNMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="zmm {k}, zmm, zmm" xed="VFNMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="zmm {k}, zmm, zmm" xed="VFNMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fnmadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="zmm {k}, zmm, zmm" xed="VFNMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="zmm {k}, zmm, zmm" xed="VFNMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="zmm {k}, zmm, zmm" xed="VFNMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fnmadd_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD213PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMADD231PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="zmm, zmm, zmm" xed="VFNMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="zmm, zmm, zmm" xed="VFNMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="zmm, zmm, zmm" xed="VFNMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fnmadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="zmm, zmm, zmm {er}" xed="VFNMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="zmm, zmm, zmm {er}" xed="VFNMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="zmm, zmm, zmm {er}" xed="VFNMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="zmm {k}, zmm, zmm" xed="VFNMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="zmm {k}, zmm, zmm" xed="VFNMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="zmm {k}, zmm, zmm" xed="VFNMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fnmadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="zmm {k}, zmm, zmm" xed="VFNMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="zmm {k}, zmm, zmm" xed="VFNMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="zmm {k}, zmm, zmm" xed="VFNMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fnmadd_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD213PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMADD231PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMADD231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="zmm, zmm, zmm" xed="VFNMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="zmm, zmm, zmm" xed="VFNMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="zmm, zmm, zmm" xed="VFNMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fnmsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="zmm, zmm, zmm {er}" xed="VFNMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="zmm, zmm, zmm {er}" xed="VFNMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="zmm, zmm, zmm {er}" xed="VFNMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="zmm {k}, zmm, zmm" xed="VFNMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="zmm {k}, zmm, zmm" xed="VFNMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="zmm {k}, zmm, zmm" xed="VFNMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fnmsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="zmm {k}, zmm, zmm" xed="VFNMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="zmm {k}, zmm, zmm" xed="VFNMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="zmm {k}, zmm, zmm" xed="VFNMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fnmsub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="__m512d" varname="c" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB132PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB213PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB213PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <instruction name="VFNMSUB231PD" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB231PD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="zmm, zmm, zmm" xed="VFNMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="zmm, zmm, zmm" xed="VFNMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="zmm, zmm, zmm" xed="VFNMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_fnmsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="zmm, zmm, zmm {er}" xed="VFNMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="zmm, zmm, zmm {er}" xed="VFNMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="zmm, zmm, zmm {er}" xed="VFNMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="zmm {k}, zmm, zmm" xed="VFNMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="zmm {k}, zmm, zmm" xed="VFNMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="zmm {k}, zmm, zmm" xed="VFNMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask3_fnmsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set). [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="zmm {k}, zmm, zmm" xed="VFNMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="zmm {k}, zmm, zmm" xed="VFNMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="zmm {k}, zmm, zmm" xed="VFNMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_fnmsub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="__m512" varname="c" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB132PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB213PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB213PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <instruction name="VFNMSUB231PS" form="zmm {k}, zmm, zmm {er}" xed="VFNMSUB231PS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERDPS" form="zmm, vm32z" xed="VGATHERDPS_ZMMf32_MASKmskw_MEMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERDPS" form="zmm {k}, vm32z" xed="VGATHERDPS_ZMMf32_MASKmskw_MEMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_getexp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="zmm, zmm" xed="VGETEXPPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_getexp_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.
+ [sae_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="zmm, zmm {sae}" xed="VGETEXPPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_getexp_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="zmm {k}, zmm" xed="VGETEXPPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_getexp_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision (64-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.
+ [sae_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPD" form="zmm {k}, zmm {sae}" xed="VGETEXPPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_getexp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="zmm, zmm" xed="VGETEXPPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_getexp_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.
+ [sae_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="zmm, zmm {sae}" xed="VGETEXPPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_getexp_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="zmm {k}, zmm" xed="VGETEXPPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_getexp_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision (32-bit) floating-point number representing the integer exponent, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "floor(log2(x))" for each element.
+ [sae_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETEXPPS" form="zmm {k}, zmm {sae}" xed="VGETEXPPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_getmant_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="zmm, zmm, imm8" xed="VGETMANTPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_getmant_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="zmm, zmm, imm8 {sae}" xed="VGETMANTPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_getmant_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="zmm {k}, zmm, imm8" xed="VGETMANTPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_getmant_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := GetNormalizedMantissa(a[i+63:i], sc, interv)
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPD" form="zmm {k}, zmm, imm8 {sae}" xed="VGETMANTPD_ZMMf64_MASKmskw_ZMMf64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_getmant_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="zmm, zmm, imm8" xed="VGETMANTPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_getmant_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="zmm, zmm, imm8 {sae}" xed="VGETMANTPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_getmant_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="zmm {k}, zmm, imm8" xed="VGETMANTPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_getmant_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_MANTISSA_NORM_ENUM" varname="interv" etype="IMM" immtype="_MM_MANTISSA_NORM"/>
+ <parameter type="_MM_MANTISSA_SIGN_ENUM" varname="sc" etype="IMM" immtype="_MM_MANTISSA_SIGN"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Normalize the mantissas of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). This intrinsic essentially calculates "±(2^k)*|x.significand|", where "k" depends on the interval range defined by "interv" and the sign depends on "sc" and the source sign.
+ [getmant_note][sae_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := GetNormalizedMantissa(a[i+31:i], sc, interv)
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGETMANTPS" form="zmm {k}, zmm, imm8 {sae}" xed="VGETMANTPS_ZMMf32_MASKmskw_ZMMf32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_load_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <description>Load 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from memory into "dst".
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="zmm, m512" xed="VMOVAPD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_load_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <description>Load packed double-precision (64-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="zmm {k}, m512" xed="VMOVAPD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_mov_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Move</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Move packed double-precision (64-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="zmm {k}, zmm" xed="VMOVAPD_ZMMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_store_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Store packed double-precision (64-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVAPD" form="m512 {k}, zmm" xed="VMOVAPD_MEMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_store_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP64" memwidth="512"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Store 512-bits (composed of 8 packed double-precision (64-bit) floating-point elements) from "a" into memory.
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVAPD" form="m512, zmm" xed="VMOVAPD_MEMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_load_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <description>Load 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from memory into "dst".
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="zmm, m512" xed="VMOVAPS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_load_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <description>Load packed single-precision (32-bit) floating-point elements from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="zmm {k}, m512" xed="VMOVAPS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_mov_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Move</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Move packed single-precision (32-bit) floating-point elements from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="zmm {k}, zmm" xed="VMOVAPS_ZMMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_store_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Store packed single-precision (32-bit) floating-point elements from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVAPS" form="m512 {k}, zmm" xed="VMOVAPS_MEMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_store_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="FP32" memwidth="512"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Store 512-bits (composed of 16 packed single-precision (32-bit) floating-point elements) from "a" into memory.
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVAPS" form="m512, zmm" xed="VMOVAPS_MEMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_load_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <description>Load 512-bits (composed of 16 packed 32-bit integers) from memory into "dst".
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="zmm, m512" xed="VMOVDQA32_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_load_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="void const*" varname="mem_addr" etype="M512" memwidth="512"/>
+ <description>Load 512-bits of integer data from memory into "dst".
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="zmm, m512" xed="VMOVDQA32_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_load_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <description>Load packed 32-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MEM[mem_addr+i+31:mem_addr+i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="zmm {k}, m512" xed="VMOVDQA32_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_mov_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Move</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Move packed 32-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="zmm {k}, zmm" xed="VMOVDQA32_ZMMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_store_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Store packed 32-bit integers from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ MEM[mem_addr+i+31:mem_addr+i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQA32" form="m512 {k}, zmm" xed="VMOVDQA32_MEMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_store_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="512"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Store 512-bits (composed of 16 packed 32-bit integers) from "a" into memory.
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVDQA32" form="m512, zmm" xed="VMOVDQA32_MEMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_store_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="M512" memwidth="512"/>
+ <parameter type="__m512i" varname="a" etype="M512"/>
+ <description>Store 512-bits of integer data from "a" into memory.
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVDQA32" form="m512, zmm" xed="VMOVDQA32_MEMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_load_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <description>Load 512-bits (composed of 8 packed 64-bit integers) from memory into "dst".
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[511:0] := MEM[mem_addr+511:mem_addr]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="zmm, m512" xed="VMOVDQA64_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_load_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <description>Load packed 64-bit integers from memory into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := MEM[mem_addr+i+63:mem_addr+i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="zmm {k}, m512" xed="VMOVDQA64_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_mov_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Move</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Move packed 64-bit integers from "a" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="zmm {k}, zmm" xed="VMOVDQA64_ZMMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_store_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Store packed 64-bit integers from "a" into memory using writemask "k".
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ MEM[mem_addr+i+63:mem_addr+i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQA64" form="m512 {k}, zmm" xed="VMOVDQA64_MEMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_store_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="512"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Store 512-bits (composed of 8 packed 64-bit integers) from "a" into memory.
+ "mem_addr" must be aligned on a 64-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+511:mem_addr] := a[511:0]
+ </operation>
+ <instruction name="VMOVDQA64" form="m512, zmm" xed="VMOVDQA64_MEMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_mul_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). RM.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPD" form="zmm {k}, zmm, zmm" xed="VMULPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_mul_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPD" form="zmm {k}, zmm, zmm {er}" xed="VMULPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mul_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPD" form="zmm, zmm, zmm" xed="VMULPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mul_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPD" form="zmm, zmm, zmm {er}" xed="VMULPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_mul_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). RM.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPS" form="zmm {k}, zmm, zmm" xed="VMULPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_mul_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPS" form="zmm {k}, zmm, zmm {er}" xed="VMULPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mul_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPS" form="zmm, zmm, zmm" xed="VMULPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mul_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMULPS" form="zmm, zmm, zmm {er}" xed="VMULPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_add_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Add packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDD" form="zmm, zmm, zmm" xed="VPADDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_add_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Add packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDD" form="zmm {k}, zmm, zmm" xed="VPADDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_and_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] AND b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDD" form="zmm, zmm, zmm" xed="VPANDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_and_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="__m512i" varname="a" etype="M512"/>
+ <parameter type="__m512i" varname="b" etype="M512"/>
+ <description>Compute the bitwise AND of 512 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[511:0] := (a[511:0] AND b[511:0])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDD" form="zmm, zmm, zmm" xed="VPANDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_andnot_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (NOT a[i+31:i]) AND b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDND" form="zmm, zmm, zmm" xed="VPANDND_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_andnot_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="__m512i" varname="a" etype="M512"/>
+ <parameter type="__m512i" varname="b" etype="M512"/>
+ <description>Compute the bitwise NOT of 512 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst".</description>
+ <operation>
+dst[511:0] := ((NOT a[511:0]) AND b[511:0])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDND" form="zmm, zmm, zmm" xed="VPANDND_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_andnot_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NOT of packed 32-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDND" form="zmm {k}, zmm, zmm" xed="VPANDND_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_andnot_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NOT of 512 bits (composed of packed 64-bit integers) in "a" and then AND with "b", and store the results in "dst".</description>
+ <operation>
+dst[511:0] := ((NOT a[511:0]) AND b[511:0])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDNQ" form="zmm, zmm, zmm" xed="VPANDNQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_andnot_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NOT of packed 64-bit integers in "a" and then AND with "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDNQ" form="zmm {k}, zmm, zmm" xed="VPANDNQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_and_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of 512 bits (composed of packed 64-bit integers) in "a" and "b", and store the results in "dst".</description>
+ <operation>
+dst[511:0] := (a[511:0] AND b[511:0])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDQ" form="zmm, zmm, zmm" xed="VPANDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_and_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] AND b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDQ" form="zmm {k}, zmm, zmm" xed="VPANDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_blend_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Blend packed 32-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBLENDMD" form="zmm {k}, zmm, zmm" xed="VPBLENDMD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_blend_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Blend packed 64-bit integers from "a" and "b" using control mask "k", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPBLENDMQ" form="zmm {k}, zmm, zmm" xed="VPBLENDMQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmp_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, zmm, zmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpeq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPEQD" form="k, zmm, zmm" xed="VPCMPEQD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpge_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, zmm, zmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpgt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPGTD" form="k, zmm, zmm" xed="VPCMPGTD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmple_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, zmm, zmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpneq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k, zmm, zmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmp_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, zmm, zmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpeq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPEQD" form="k {k}, zmm, zmm" xed="VPCMPEQD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpge_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, zmm, zmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpgt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPGTD" form="k {k}, zmm, zmm" xed="VPCMPGTD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmple_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, zmm, zmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpneq_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPD" form="k {k}, zmm, zmm, imm8" xed="VPCMPD_MASKmskw_MASKmskw_ZMMi32_ZMMi32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmp_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k".</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpeq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpge_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpgt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmple_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmplt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cmpneq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmp_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="_MM_CMPINT_ENUM" varname="imm8" etype="IMM" immtype="_MM_CMPINT"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" based on the comparison operand specified by "imm8", and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>CASE (imm8[2:0]) OF
+0: OP := _MM_CMPINT_EQ
+1: OP := _MM_CMPINT_LT
+2: OP := _MM_CMPINT_LE
+3: OP := _MM_CMPINT_FALSE
+4: OP := _MM_CMPINT_NE
+5: OP := _MM_CMPINT_NLT
+6: OP := _MM_CMPINT_NLE
+7: OP := _MM_CMPINT_TRUE
+ESAC
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] OP b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpeq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for equality, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] == b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpge_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpgt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for greater-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &gt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmple_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmplt_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cmpneq_epu32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b" for not-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] != b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPUD" form="k {k}, zmm, zmm, imm8" xed="VPCMPUD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_permutevar_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Note that this intrinsic shuffles across 128-bit lanes, unlike past intrinsics that use the "permutevar" name. This intrinsic is identical to "_mm512_mask_permutexvar_epi32", and it is recommended that you use that intrinsic name.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ id := idx[i+3:i]*32
+ IF k[j]
+ dst[i+31:i] := a[id+31:id]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMD" form="zmm {k}, zmm, zmm" xed="VPERMD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_permutevar_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="idx" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Shuffle 32-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst". Note that this intrinsic shuffles across 128-bit lanes, unlike past intrinsics that use the "permutevar" name. This intrinsic is identical to "_mm512_permutexvar_epi32", and it is recommended that you use that intrinsic name.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ id := idx[i+3:i]*32
+ dst[i+31:i] := a[id+31:id]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMD" form="zmm, zmm, zmm" xed="VPERMD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="UI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst". "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERDD" form="zmm, vm32z" xed="VPGATHERDD_ZMMu32_MASKmskw_MEMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32gather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="UI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Gather 32-bit integers from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERDD" form="zmm {k}, vm32z" xed="VPGATHERDD_ZMMu32_MASKmskw_MEMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_max_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSD" form="zmm {k}, zmm, zmm" xed="VPMAXSD_ZMMi32_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_max_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXSD" form="zmm, zmm, zmm" xed="VPMAXSD_ZMMi32_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_max_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUD" form="zmm {k}, zmm, zmm" xed="VPMAXUD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_max_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMAXUD" form="zmm, zmm, zmm" xed="VPMAXUD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_min_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSD" form="zmm {k}, zmm, zmm" xed="VPMINSD_ZMMi32_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_min_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINSD" form="zmm, zmm, zmm" xed="VPMINSD_ZMMi32_MASKmskw_ZMMi32_ZMMi32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_min_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUD" form="zmm {k}, zmm, zmm" xed="VPMINUD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_min_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMINUD" form="zmm, zmm, zmm" xed="VPMINUD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_mullo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ tmp[63:0] := a[i+31:i] * b[i+31:i]
+ dst[i+31:i] := tmp[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULLD" form="zmm {k}, zmm, zmm" xed="VPMULLD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mullo_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ tmp[63:0] := a[i+31:i] * b[i+31:i]
+ dst[i+31:i] := tmp[31:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULLD" form="zmm, zmm, zmm" xed="VPMULLD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_or_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPORD" form="zmm {k}, zmm, zmm" xed="VPORD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_or_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise OR of packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPORD" form="zmm, zmm, zmm" xed="VPORD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_or_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="__m512i" varname="a" etype="M512"/>
+ <parameter type="__m512i" varname="b" etype="M512"/>
+ <description>Compute the bitwise OR of 512 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[511:0] := (a[511:0] OR b[511:0])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPORD" form="zmm, zmm, zmm" xed="VPORD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_or_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPORQ" form="zmm {k}, zmm, zmm" xed="VPORQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_or_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise OR of packed 64-bit integers in "a" and "b", and store the resut in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPORQ" form="zmm, zmm, zmm" xed="VPORQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDD" form="vm32z, zmm" xed="VPSCATTERDD_MEMu32_MASKmskw_ZMMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32scatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter 32-bit integers from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDD" form="vm32z {k}, zmm" xed="VPSCATTERDD_MEMu32_MASKmskw_ZMMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_shuffle_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="_MM_PERM_ENUM" varname="imm8" etype="IMM" immtype="_MM_PERM"/>
+ <description>Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+tmp_dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+tmp_dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+tmp_dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+tmp_dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+tmp_dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+tmp_dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+tmp_dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+tmp_dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+tmp_dst[287:256] := SELECT4(a[383:256], imm8[1:0])
+tmp_dst[319:288] := SELECT4(a[383:256], imm8[3:2])
+tmp_dst[351:320] := SELECT4(a[383:256], imm8[5:4])
+tmp_dst[383:352] := SELECT4(a[383:256], imm8[7:6])
+tmp_dst[415:384] := SELECT4(a[511:384], imm8[1:0])
+tmp_dst[447:416] := SELECT4(a[511:384], imm8[3:2])
+tmp_dst[479:448] := SELECT4(a[511:384], imm8[5:4])
+tmp_dst[511:480] := SELECT4(a[511:384], imm8[7:6])
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := tmp_dst[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFD" form="zmm {k}, zmm, imm8" xed="VPSHUFD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_shuffle_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="_MM_PERM_ENUM" varname="imm8" etype="IMM" immtype="_MM_PERM"/>
+ <description>Shuffle 32-bit integers in "a" within 128-bit lanes using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+dst[159:128] := SELECT4(a[255:128], imm8[1:0])
+dst[191:160] := SELECT4(a[255:128], imm8[3:2])
+dst[223:192] := SELECT4(a[255:128], imm8[5:4])
+dst[255:224] := SELECT4(a[255:128], imm8[7:6])
+dst[287:256] := SELECT4(a[383:256], imm8[1:0])
+dst[319:288] := SELECT4(a[383:256], imm8[3:2])
+dst[351:320] := SELECT4(a[383:256], imm8[5:4])
+dst[383:352] := SELECT4(a[383:256], imm8[7:6])
+dst[415:384] := SELECT4(a[511:384], imm8[1:0])
+dst[447:416] := SELECT4(a[511:384], imm8[3:2])
+dst[479:448] := SELECT4(a[511:384], imm8[5:4])
+dst[511:480] := SELECT4(a[511:384], imm8[7:6])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHUFD" form="zmm, zmm, imm8" xed="VPSHUFD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_slli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLD" form="zmm {k}, zmm, imm8" xed="VPSLLD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_slli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLD" form="zmm, zmm, imm8" xed="VPSLLD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_sllv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLVD" form="zmm {k}, zmm, zmm" xed="VPSLLVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_sllv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSLLVD" form="zmm, zmm, zmm" xed="VPSLLVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_srai_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAD" form="zmm {k}, zmm, imm8" xed="VPSRAD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_srai_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="6"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAD" form="zmm, zmm, imm8" xed="VPSRAD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_srav_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0)
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAVD" form="zmm {k}, zmm, zmm" xed="VPSRAVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_srav_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0)
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRAVD" form="zmm, zmm, zmm" xed="VPSRAVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_srli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLD" form="zmm {k}, zmm, imm8" xed="VPSRLD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_srli_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLD" form="zmm, zmm, imm8" xed="VPSRLD_ZMMu32_MASKmskw_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_srlv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLVD" form="zmm {k}, zmm, zmm" xed="VPSRLVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_srlv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by the amount specified by the corresponding element in "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF count[i+31:i] &lt; 32
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSRLVD" form="zmm, zmm, zmm" xed="VPSRLVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_sub_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBD" form="zmm {k}, zmm, zmm" xed="VPSUBD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_sub_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBD" form="zmm, zmm, zmm" xed="VPSUBD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_test_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" (subject to writemask "k") if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTMD" form="k {k}, zmm, zmm" xed="VPTESTMD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_test_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise AND of packed 32-bit integers in "a" and "b", producing intermediate 32-bit values, and set the corresponding bit in result mask "k" if the intermediate value is non-zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ((a[i+31:i] AND b[i+31:i]) != 0) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPTESTMD" form="k, zmm, zmm" xed="VPTESTMD_MASKmskw_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_xor_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPXORD" form="zmm {k}, zmm, zmm" xed="VPXORD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_xor_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Compute the bitwise XOR of packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPXORD" form="zmm, zmm, zmm" xed="VPXORD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_xor_si512">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="__m512i" varname="a" etype="M512"/>
+ <parameter type="__m512i" varname="b" etype="M512"/>
+ <description>Compute the bitwise XOR of 512 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[511:0] := (a[511:0] XOR b[511:0])
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPXORD" form="zmm, zmm, zmm" xed="VPXORD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_xor_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPXORQ" form="zmm {k}, zmm, zmm" xed="VPXORQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_xor_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <description>Compute the bitwise XOR of packed 64-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPXORQ" form="zmm, zmm, zmm" xed="VPXORQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPS" form="vm32z, zmm" xed="VSCATTERDPS_MEMf32_MASKmskw_ZMMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Scatter single-precision (32-bit) floating-point elements from "a" into memory using 32-bit indices. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not stored when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPS" form="vm32z {k}, zmm" xed="VSCATTERDPS_MEMf32_MASKmskw_ZMMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_sub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPD" form="zmm {k}, zmm, zmm" xed="VSUBPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_sub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPD" form="zmm {k}, zmm, zmm {er}" xed="VSUBPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_sub_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPD" form="zmm, zmm, zmm" xed="VSUBPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_sub_round_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPD" form="zmm, zmm, zmm {er}" xed="VSUBPD_ZMMf64_MASKmskw_ZMMf64_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_sub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPS" form="zmm {k}, zmm, zmm" xed="VSUBPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_sub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPS" form="zmm {k}, zmm, zmm {er}" xed="VSUBPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_sub_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPS" form="zmm, zmm, zmm" xed="VSUBPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_sub_round_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBPS" form="zmm, zmm, zmm {er}" xed="VSUBPS_ZMMf32_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_castpd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Cast</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m512d to type __m512.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_castpd_si512">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Cast</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m512d to type __m512i.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_castps_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Cast</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m512 to type __m512d.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_castps_si512">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Cast</category>
+ <return type="__m512i" varname="dst" etype="M512"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m512 to type __m512i.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_castsi512_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Cast</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Cast vector of type __m512i to type __m512d.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_castsi512_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Cast</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Cast vector of type __m512i to type __m512.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_add_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed 32-bit integers in "a" by addition using mask "k". Returns the sum of all active elements in "a".</description>
+ <operation>
+dst[31:0] := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := dst[31:0] + a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_add_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed 64-bit integers in "a" by addition using mask "k". Returns the sum of all active elements in "a".</description>
+ <operation>
+dst[63:0] := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := dst[63:0] + a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_add_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Reduce the packed double-precision (64-bit) floating-point elements in "a" by addition using mask "k". Returns the sum of all active elements in "a".</description>
+ <operation>
+dst[63:0] := 0.0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := dst[63:0] + a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_add_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Reduce the packed single-precision (32-bit) floating-point elements in "a" by addition using mask "k". Returns the sum of all active elements in "a".</description>
+ <operation>
+dst[31:0] := 0.0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := dst[31:0] + a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_and_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed 32-bit integers in "a" by bitwise AND using mask "k". Returns the bitwise AND of all active elements in "a".</description>
+ <operation>
+dst[31:0] := 0xFFFFFFFF
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := dst[31:0] AND a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_and_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed 64-bit integers in "a" by bitwise AND using mask "k". Returns the bitwise AND of all active elements in "a".</description>
+ <operation>
+dst[63:0] := 0xFFFFFFFFFFFFFFFF
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := dst[63:0] AND a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_max_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="int" varname="dst" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Reduce the packed signed 32-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a".</description>
+ <operation>
+dst[31:0] := Int32(-0x80000000)
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := (dst[31:0] &gt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_max_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__int64" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Reduce the packed signed 64-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a".</description>
+ <operation>
+dst[63:0] := Int64(-0x8000000000000000)
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := (dst[63:0] &gt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_max_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed unsigned 32-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a".</description>
+ <operation>
+dst[31:0] := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := (dst[31:0] &gt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_max_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed unsigned 64-bit integers in "a" by maximum using mask "k". Returns the maximum of all active elements in "a".</description>
+ <operation>
+dst[63:0] := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := (dst[63:0] &gt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_max_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Reduce the packed double-precision (64-bit) floating-point elements in "a" by maximum using mask "k". Returns the maximum of all active elements in "a".</description>
+ <operation>
+dst[63:0] := Cast_FP64(0xFFEFFFFFFFFFFFFF)
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := (dst[63:0] &gt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_max_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Reduce the packed single-precision (32-bit) floating-point elements in "a" by maximum using mask "k". Returns the maximum of all active elements in "a".</description>
+ <operation>
+dst[31:0] := Cast_FP32(0xFF7FFFFF)
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := (dst[31:0] &gt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_min_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="int" varname="dst" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Reduce the packed signed 32-bit integers in "a" by maximum using mask "k". Returns the minimum of all active elements in "a".</description>
+ <operation>
+dst[31:0] := Int32(0x7FFFFFFF)
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := (dst[31:0] &lt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_min_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__int64" varname="dst" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Reduce the packed signed 64-bit integers in "a" by maximum using mask "k". Returns the minimum of all active elements in "a".</description>
+ <operation>
+dst[63:0] := Int64(0x7FFFFFFFFFFFFFFF)
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := (dst[63:0] &lt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_min_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed unsigned 32-bit integers in "a" by maximum using mask "k". Returns the minimum of all active elements in "a".</description>
+ <operation>
+dst[31:0] := 0xFFFFFFFF
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := (dst[31:0] &lt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_min_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed unsigned 64-bit integers in "a" by minimum using mask "k". Returns the minimum of all active elements in "a".</description>
+ <operation>
+dst[63:0] := 0xFFFFFFFFFFFFFFFF
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := (dst[63:0] &lt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_min_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Reduce the packed double-precision (64-bit) floating-point elements in "a" by maximum using mask "k". Returns the minimum of all active elements in "a".</description>
+ <operation>
+dst[63:0] := Cast_FP64(0x7FEFFFFFFFFFFFFF)
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := (dst[63:0] &lt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_min_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Reduce the packed single-precision (32-bit) floating-point elements in "a" by maximum using mask "k". Returns the minimum of all active elements in "a".</description>
+ <operation>
+dst[31:0] := Cast_FP32(0x7F7FFFFF)
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := (dst[31:0] &lt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_mul_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed 32-bit integers in "a" by multiplication using mask "k". Returns the product of all active elements in "a".</description>
+ <operation>
+dst[31:0] := 1
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := dst[31:0] * a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_mul_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed 64-bit integers in "a" by multiplication using mask "k". Returns the product of all active elements in "a".</description>
+ <operation>
+dst[63:0] := 1
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := dst[63:0] * a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_mul_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Reduce the packed double-precision (64-bit) floating-point elements in "a" by multiplication using mask "k". Returns the product of all active elements in "a".</description>
+ <operation>
+dst[63:0] := 1.0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := dst[63:0] * a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_mul_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Reduce the packed single-precision (32-bit) floating-point elements in "a" by multiplication using mask "k". Returns the product of all active elements in "a".</description>
+ <operation>
+dst[31:0] := FP32(1.0)
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := dst[31:0] * a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_or_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed 32-bit integers in "a" by bitwise OR using mask "k". Returns the bitwise OR of all active elements in "a".</description>
+ <operation>
+dst[31:0] := 0
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[31:0] := dst[31:0] OR a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_mask_reduce_or_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed 64-bit integers in "a" by bitwise OR using mask "k". Returns the bitwise OR of all active elements in "a".</description>
+ <operation>
+dst[63:0] := 0
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[63:0] := dst[63:0] OR a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_add_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed 32-bit integers in "a" by addition. Returns the sum of all elements in "a".</description>
+ <operation>
+dst[31:0] := 0
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := dst[31:0] + a[i+31:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_add_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed 64-bit integers in "a" by addition. Returns the sum of all elements in "a".</description>
+ <operation>
+dst[63:0] := 0
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := dst[63:0] + a[i+63:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_add_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Reduce the packed double-precision (64-bit) floating-point elements in "a" by addition. Returns the sum of all elements in "a".</description>
+ <operation>
+dst[63:0] := 0.0
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := dst[63:0] + a[i+63:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_add_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Reduce the packed single-precision (32-bit) floating-point elements in "a" by addition. Returns the sum of all elements in "a".</description>
+ <operation>
+dst[31:0] := 0.0
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := dst[31:0] + a[i+31:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_and_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed 32-bit integers in "a" by bitwise AND. Returns the bitwise AND of all elements in "a".</description>
+ <operation>
+dst[31:0] := 0xFFFFFFFF
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := dst[31:0] AND a[i+31:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_and_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed 64-bit integers in "a" by bitwise AND. Returns the bitwise AND of all elements in "a".</description>
+ <operation>
+dst[63:0] := 0xFFFFFFFFFFFFFFFF
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := dst[63:0] AND a[i+63:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_max_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="int" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Reduce the packed signed 32-bit integers in "a" by maximum. Returns the maximum of all elements in "a".</description>
+ <operation>
+dst[31:0] := Int32(-0x80000000)
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := (dst[31:0] &gt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_max_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__int64" varname="dst" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Reduce the packed signed 64-bit integers in "a" by maximum. Returns the maximum of all elements in "a".</description>
+ <operation>
+dst[63:0] := Int64(-0x8000000000000000)
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := (dst[63:0] &gt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_max_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed unsigned 32-bit integers in "a" by maximum. Returns the maximum of all elements in "a".</description>
+ <operation>
+dst[31:0] := 0
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := (dst[31:0] &gt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_max_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed unsigned 64-bit integers in "a" by maximum. Returns the maximum of all elements in "a".</description>
+ <operation>
+dst[63:0] := 0
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := (dst[63:0] &gt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_max_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Reduce the packed double-precision (64-bit) floating-point elements in "a" by maximum. Returns the maximum of all elements in "a".</description>
+ <operation>
+dst[63:0] := Cast_FP64(0xFFEFFFFFFFFFFFFF)
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := (dst[63:0] &gt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_max_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Reduce the packed single-precision (32-bit) floating-point elements in "a" by maximum. Returns the maximum of all elements in "a".</description>
+ <operation>
+dst[31:0] := Cast_FP32(0xFF7FFFFF)
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := (dst[31:0] &gt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_min_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="int" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <description>Reduce the packed signed 32-bit integers in "a" by minimum. Returns the minimum of all elements in "a".</description>
+ <operation>
+dst[31:0] := Int32(0x7FFFFFFF)
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := (dst[31:0] &lt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_min_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__int64" varname="dst" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="SI64"/>
+ <description>Reduce the packed signed 64-bit integers in "a" by minimum. Returns the minimum of all elements in "a".</description>
+ <operation>
+dst[63:0] := Int64(0x7FFFFFFFFFFFFFFF)
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := (dst[63:0] &lt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_min_epu32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed unsigned 32-bit integers in "a" by minimum. Returns the minimum of all elements in "a".</description>
+ <operation>
+dst[31:0] := 0xFFFFFFFF
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := (dst[31:0] &lt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_min_epu64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed unsigned 64-bit integers in "a" by minimum. Returns the minimum of all elements in "a".</description>
+ <operation>
+dst[63:0] := 0xFFFFFFFFFFFFFFFF
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := (dst[63:0] &lt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_min_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Reduce the packed double-precision (64-bit) floating-point elements in "a" by minimum. Returns the minimum of all elements in "a".</description>
+ <operation>
+dst[63:0] := Cast_FP64(0x7FEFFFFFFFFFFFFF)
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := (dst[63:0] &lt; a[i+63:i] ? dst[63:0] : a[i+63:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_min_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Reduce the packed single-precision (32-bit) floating-point elements in "a" by minimum. Returns the minimum of all elements in "a".</description>
+ <operation>
+dst[31:0] := Cast_FP32(0x7F7FFFFF)
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := (dst[31:0] &lt; a[i+31:i] ? dst[31:0] : a[i+31:i])
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_mul_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed 32-bit integers in "a" by multiplication. Returns the product of all elements in "a".</description>
+ <operation>
+dst[31:0] := 1
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := dst[31:0] * a[i+31:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_mul_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed 64-bit integers in "a" by multiplication. Returns the product of all elements in "a".</description>
+ <operation>
+dst[63:0] := 1
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := dst[63:0] * a[i+63:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_mul_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Reduce the packed double-precision (64-bit) floating-point elements in "a" by multiplication. Returns the product of all elements in "a".</description>
+ <operation>
+dst[63:0] := 1.0
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := dst[63:0] * a[i+63:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_mul_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Reduce the packed single-precision (32-bit) floating-point elements in "a" by multiplication. Returns the product of all elements in "a".</description>
+ <operation>
+dst[31:0] := FP32(1.0)
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := dst[31:0] * a[i+31:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_or_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Reduce the packed 32-bit integers in "a" by bitwise OR. Returns the bitwise OR of all elements in "a".</description>
+ <operation>
+dst[31:0] := 0
+FOR j := 0 to 15
+ i := j*32
+ dst[31:0] := dst[31:0] OR a[i+31:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" sequence="TRUE" name="_mm512_reduce_or_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Reduce the packed 64-bit integers in "a" by bitwise OR. Returns the bitwise OR of all elements in "a".</description>
+ <operation>
+dst[63:0] := 0
+FOR j := 0 to 7
+ i := j*64
+ dst[63:0] := dst[63:0] OR a[i+63:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_and_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Logical</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <description>Performs element-by-element bitwise AND between packed 32-bit integer elements of "v2" and "v3", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := v2[i+31:i] &amp; v3[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDD" form="zmm {k}, zmm, zmm" xed="VPANDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cvtpslo_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <description>Performs element-by-element conversion of the lower half of packed single-precision (32-bit) floating-point elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := j*64
+ dst[n+63:n] := Convert_FP32_To_FP64(v2[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2PD" form="zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cvtpslo_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <description>Performs element-by-element conversion of the lower half of packed single-precision (32-bit) floating-point elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[l+63:l] := Convert_FP32_To_FP64(v2[i+31:i])
+ ELSE
+ dst[l+63:l] := src[l+63:l]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPS2PD" form="zmm {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cvtepi32lo_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="v2" etype="SI32"/>
+ <description>Performs element-by-element conversion of the lower half of packed 32-bit integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ l := j*64
+ dst[l+63:l] := Convert_Int32_To_FP64(v2[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTDQ2PD" form="zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cvtepi32lo_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v2" etype="SI32"/>
+ <description>Performs element-by-element conversion of the lower half of packed 32-bit integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := j*64
+ IF k[j]
+ dst[n+63:n] := Convert_Int32_To_FP64(v2[i+31:i])
+ ELSE
+ dst[n+63:n] := src[n+63:n]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTDQ2PD" form="zmm {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cvtepu32lo_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <description>Performs element-by-element conversion of the lower half of packed 32-bit unsigned integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ n := j*64
+ dst[n+63:n] := Convert_Int32_To_FP64(v2[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PD" form="zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cvtepu32lo_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <description>Performs element-by-element conversion of the lower half of 32-bit unsigned integer elements in "v2" to packed double-precision (64-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ l := j*64
+ IF k[j]
+ dst[l+63:l] := Convert_Int32_To_FP64(v2[i+31:i])
+ ELSE
+ dst[l+63:l] := src[l+63:l]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTUDQ2PD" form="zmm {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32extgather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="void const *" varname="base_addr" etype="UI32"/>
+ <parameter type="_MM_UPCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 16 memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using "conv" to 32-bit integer elements and stores them in "dst". AVX512 only supports _MM_UPCONV_EPI32_NONE.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE: dst[i+31:i] := MEM[addr+31:addr]
+ _MM_UPCONV_EPI32_UINT8: dst[i+31:i] := ZeroExtend32(MEM[addr+7:addr])
+ _MM_UPCONV_EPI32_SINT8: dst[i+31:i] := SignExtend32(MEM[addr+7:addr])
+ _MM_UPCONV_EPI32_UINT16: dst[i+31:i] := ZeroExtend32(MEM[addr+15:addr])
+ _MM_UPCONV_EPI32_SINT16: dst[i+31:i] := SignExtend32(MEM[addr+15:addr])
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERDD" form="zmm, m512" xed="VPGATHERDD_ZMMu32_MASKmskw_MEMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32extgather_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="void const *" varname="base_addr" etype="UI32"/>
+ <parameter type="_MM_UPCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 16 single-precision (32-bit) memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using "conv" to 32-bit integer elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). AVX512 only supports _MM_UPCONV_EPI32_NONE.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE: dst[i+31:i] := MEM[addr+31:addr]
+ _MM_UPCONV_EPI32_UINT8: dst[i+31:i] := ZeroExtend32(MEM[addr+7:addr])
+ _MM_UPCONV_EPI32_SINT8: dst[i+31:i] := SignExtend32(MEM[addr+7:addr])
+ _MM_UPCONV_EPI32_UINT16: dst[i+31:i] := ZeroExtend32(MEM[addr+15:addr])
+ _MM_UPCONV_EPI32_SINT16: dst[i+31:i] := SignExtend32(MEM[addr+15:addr])
+ ESAC
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERDD" form="zmm {k}, m512" xed="VPGATHERDD_ZMMu32_MASKmskw_MEMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32loextgather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="UI64"/>
+ <parameter type="_MM_UPCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 double-precision (64-bit) memory locations starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" using "conv" to 64-bit integer elements and stores them in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE: dst[i+63:i] := MEM[addr+63:addr]
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="zmm, m512" xed="VPGATHERDQ_ZMMu64_MASKmskw_MEMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32loextgather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="UI64"/>
+ <parameter type="_MM_UPCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 double-precision (64-bit) memory locations starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" using "conv" to 64-bit integer elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE: dst[i+63:i] := MEM[addr+63:addr]
+ ESAC
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="zmm {k}, m512" xed="VPGATHERDQ_ZMMu64_MASKmskw_MEMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32extgather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="void const *" varname="base_addr" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 16 memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using "conv" to single-precision (32-bit) floating-point elements and stores them in "dst". AVX512 only supports _MM_UPCONV_PS_NONE.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_UPCONV_PS_NONE: dst[i+31:i] := MEM[addr+31:addr]
+ _MM_UPCONV_PS_FLOAT16: dst[i+31:i] := Convert_FP16_To_FP32(MEM[addr+15:addr])
+ _MM_UPCONV_PS_UINT8: dst[i+31:i] := Convert_UInt8_To_FP32(MEM[addr+7:addr])
+ _MM_UPCONV_PS_SINT8: dst[i+31:i] := Convert_Int8_To_FP32(MEM[addr+7:addr])
+ _MM_UPCONV_PS_UINT16: dst[i+31:i] := Convert_UInt16_To_FP32(MEM[addr+15:addr])
+ _MM_UPCONV_PS_SINT16: dst[i+31:i] := Convert_Int16_To_FP32(MEM[addr+15:addr])
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERDPS" form="zmm, m512" xed="VGATHERDPS_ZMMf32_MASKmskw_MEMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32extgather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="void const *" varname="base_addr" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 16 single-precision (32-bit) memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using "conv" to single-precision (32-bit) floating-point elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). AVX512 only supports _MM_UPCONV_PS_NONE.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_UPCONV_PS_NONE: dst[i+31:i] := MEM[addr+31:addr]
+ _MM_UPCONV_PS_FLOAT16: dst[i+31:i] := Convert_FP16_To_FP32(MEM[addr+15:addr])
+ _MM_UPCONV_PS_UINT8: dst[i+31:i] := Convert_UInt8_To_FP32(MEM[addr+7:addr])
+ _MM_UPCONV_PS_SINT8: dst[i+31:i] := Convert_Int8_To_FP32(MEM[addr+7:addr])
+ _MM_UPCONV_PS_UINT16: dst[i+31:i] := Convert_UInt16_To_FP32(MEM[addr+15:addr])
+ _MM_UPCONV_PS_SINT16: dst[i+31:i] := Convert_Int16_To_FP32(MEM[addr+15:addr])
+ ESAC
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERDPS" form="zmm {k}, m512" xed="VGATHERDPS_ZMMf32_MASKmskw_MEMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32loextgather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="FP64"/>
+ <parameter type="_MM_UPCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PD"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 double-precision (64-bit) floating-point elements in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" using "conv" to 64-bit floating-point elements and stores them in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_UPCONV_PD_NONE: dst[i+63:i] := MEM[addr+63:addr]
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="zmm, m512" xed="VGATHERDPD_ZMMf64_MASKmskw_MEMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32loextgather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="FP64"/>
+ <parameter type="_MM_UPCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PD"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 double-precision (64-bit) floating-point elements in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" using "conv" to 64-bit floating-point elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ dst[i+63:i] := MEM[addr+63:addr]
+ ESAC
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="zmm {k}, m512" xed="VGATHERDPD_ZMMf64_MASKmskw_MEMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32extscatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 16 packed single-precision (32-bit) floating-point elements in "a" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using "conv". AVX512 only supports _MM_DOWNCONV_PS_NONE.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_DOWNCONV_PS_NONE: MEM[addr+31:addr] := a[i+31:i]
+ _MM_DOWNCONV_PS_FLOAT16: MEM[addr+15:addr] := Convert_FP32_To_FP16(a[i+31:i])
+ _MM_DOWNCONV_PS_UINT8: MEM[addr+ 7:addr] := Convert_FP32_To_UInt8(a[i+31:i])
+ _MM_DOWNCONV_PS_SINT8: MEM[addr+ 7:addr] := Convert_FP32_To_Int8(a[i+31:i])
+ _MM_DOWNCONV_PS_UINT16: MEM[addr+15:addr] := Convert_FP32_To_UInt16(a[i+31:i])
+ _MM_DOWNCONV_PS_SINT16: MEM[addr+15:addr] := Convert_FP32_To_Int16(a[i+31:i])
+ ESAC
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPS" form="m512, zmm" xed="VSCATTERDPS_MEMf32_MASKmskw_ZMMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32extscatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 16 packed single-precision (32-bit) floating-point elements in "a" according to "conv" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using writemask "k" (elements are written only when the corresponding mask bit is not set). AVX512 only supports _MM_DOWNCONV_PS_NONE.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_DOWNCONV_PS_NONE: MEM[addr+31:addr] := a[i+31:i]
+ _MM_DOWNCONV_PS_FLOAT16: MEM[addr+15:addr] := Convert_FP32_To_FP16(a[i+31:i])
+ _MM_DOWNCONV_PS_UINT8: MEM[addr+ 7:addr] := Convert_FP32_To_UInt8(a[i+31:i])
+ _MM_DOWNCONV_PS_SINT8: MEM[addr+ 7:addr] := Convert_FP32_To_Int8(a[i+31:i])
+ _MM_DOWNCONV_PS_UINT16: MEM[addr+15:addr] := Convert_FP32_To_UInt16(a[i+31:i])
+ _MM_DOWNCONV_PS_SINT16: MEM[addr+15:addr] := Convert_FP32_To_Int16(a[i+31:i])
+ ESAC
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPS" form="m512 {k}, zmm" xed="VSCATTERDPS_MEMf32_MASKmskw_ZMMf32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32loextscatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 8 packed double-precision (64-bit) floating-point elements in "a" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using "conv".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_DOWNCONV_PD_NONE: MEM[addr+63:addr] := a[i+63:i]
+ ESAC
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPD" form="m512, zmm" xed="VSCATTERDPD_MEMf64_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32loextscatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 8 packed double-precision (64-bit) floating-point elements in "a" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using "conv". Only those elements whose corresponding mask bit is set in writemask "k" are written to memory.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_DOWNCONV_PD_NONE: MEM[addr+63:addr] := a[i+63:i]
+ ESAC
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPD" form="m512 {k}, zmm" xed="VSCATTERDPD_MEMf64_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32loextscatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="UI64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 8 packed 64-bit integer elements in "a" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using "conv".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_DOWNCONV_EPI64_NONE: MEM[addr+63:addr] := a[i+63:i]
+ ESAC
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDQ" form="m512, zmm" xed="VPSCATTERDQ_MEMu64_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32loextscatter_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 8 packed 64-bit integer elements in "a" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using "conv". Only those elements whose corresponding mask bit is set in writemask "k" are written to memory.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_DOWNCONV_EPI64_NONE: MEM[addr+63:addr] := a[i+63:i]
+ ESAC
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDQ" form="m512 {k}, zmm" xed="VPSCATTERDQ_MEMu64_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_cvtpd_pslo">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <description>Performs an element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to single-precision (32-bit) floating-point elements and stores them in "dst". The elements are stored in the lower half of the results vector, while the remaining upper half locations are set to 0.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k := j*32
+ dst[k+31:k] := Convert_FP64_To_FP32(v2[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_cvtpd_pslo">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <description>Performs an element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to single-precision (32-bit) floating-point elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The elements are stored in the lower half of the results vector, while the remaining upper half locations are set to 0.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_FP64_To_FP32(v2[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="zmm {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32logather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Loads 8 64-bit integer elements from memory starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" and stores them in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="zmm, m512" xed="VPGATHERDQ_ZMMu64_MASKmskw_MEMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32logather_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Loads 8 64-bit integer elements from memory starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPGATHERDQ" form="zmm {k}, m512" xed="VPGATHERDQ_ZMMu64_MASKmskw_MEMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32logather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Loads 8 double-precision (64-bit) floating-point elements stored at memory locations starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" them in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="zmm, m512" xed="VGATHERDPD_ZMMf64_MASKmskw_MEMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32logather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Loads 8 double-precision (64-bit) floating-point elements from memory starting at location "base_addr" at packed 32-bit integer indices stored in the lower half of "vindex" scaled by "scale" into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ dst[i+63:i] := MEM[addr+63:addr]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGATHERDPD" form="zmm {k}, m512" xed="VGATHERDPD_ZMMf64_MASKmskw_MEMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32loscatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Stores 8 packed double-precision (64-bit) floating-point elements in "a" and to memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPD" form="m512, zmm" xed="VSCATTERDPD_MEMf64_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32loscatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Stores 8 packed double-precision (64-bit) floating-point elements in "a" to memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale". Only those elements whose corresponding mask bit is set in writemask "k" are written to memory.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERDPD" form="m512 {k}, zmm" xed="VSCATTERDPD_MEMf64_MASKmskw_ZMMf64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_abs_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <description>Finds the absolute value of each packed single-precision (32-bit) floating-point element in "v2", storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ABS(v2[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDD" form="zmm, zmm, m512" xed="VPANDD_ZMMu32_MASKmskw_ZMMu32_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_abs_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <description>Finds the absolute value of each packed single-precision (32-bit) floating-point element in "v2", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ABS(v2[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDD" form="zmm {k}, zmm, m512" xed="VPANDD_ZMMu32_MASKmskw_ZMMu32_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_abs_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <description>Finds the absolute value of each packed double-precision (64-bit) floating-point element in "v2", storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ABS(v2[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDQ" form="zmm, zmm, m512" xed="VPANDQ_ZMMu64_MASKmskw_ZMMu64_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_abs_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <description>Finds the absolute value of each packed double-precision (64-bit) floating-point element in "v2", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ABS(v2[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPANDQ" form="zmm {k}, zmm, m512" xed="VPANDQ_ZMMu64_MASKmskw_ZMMu64_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML/KNC" name="_mm512_log2_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOG2PS" form="zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML/KNC" name="_mm512_mask_log2_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0)
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOG2PS" form="zmm {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_i32extscatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="UI32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 16 packed 32-bit integer elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale". "hint" indicates to the processor whether the data is non-temporal. AVX512 only supports _MM_DOWNCONV_EPI32_NONE.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_DOWNCONV_EPI32_NONE: MEM[addr+31:addr] := a[i+31:i]
+ _MM_DOWNCONV_EPI32_UINT8: MEM[addr+ 7:addr] := Truncate8(a[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT8: MEM[addr+ 7:addr] := Saturate8(a[i+31:i])
+ _MM_DOWNCONV_EPI32_UINT16: MEM[addr+15:addr] := Truncate16(a[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT16: MEM[addr+15:addr] := Saturate16(a[i+15:i])
+ ESAC
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDD" form="m512, zmm" xed="VPSCATTERDD_MEMu32_MASKmskw_ZMMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_i32extscatter_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512F/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 16 packed 32-bit integer elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale". Elements are written using writemask "k" (elements are only written when the corresponding mask bit is set; otherwise, elements are left unchanged in memory). "hint" indicates to the processor whether the data is non-temporal. AVX512 only supports _MM_DOWNCONV_EPI32_NONE.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_DOWNCONV_EPI32_NONE: MEM[addr+31:addr] := a[i+31:i]
+ _MM_DOWNCONV_EPI32_UINT8: MEM[addr+ 7:addr] := Truncate8(a[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT8: MEM[addr+ 7:addr] := Saturate8(a[i+31:i])
+ _MM_DOWNCONV_EPI32_UINT16: MEM[addr+15:addr] := Truncate16(a[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT16: MEM[addr+15:addr] := Saturate16(a[i+15:i])
+ ESAC
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDD" form="m512 {k}, zmm" xed="VPSCATTERDD_MEMu32_MASKmskw_ZMMu32_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_madd52lo_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADD52LUQ" form="zmm, zmm, zmm" xed="VPMADD52LUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_madd52lo_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0])
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADD52LUQ" form="zmm {k}, zmm, zmm" xed="VPMADD52LUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_madd52lo_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADD52LUQ" form="zmm {z}, zmm, zmm" xed="VPMADD52LUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_madd52lo_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADD52LUQ" form="ymm, ymm, ymm" xed="VPMADD52LUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_madd52lo_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0])
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADD52LUQ" form="ymm {k}, ymm, ymm" xed="VPMADD52LUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_madd52lo_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADD52LUQ" form="ymm {z}, ymm, ymm" xed="VPMADD52LUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_madd52lo_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMADD52LUQ" form="xmm, xmm, xmm" xed="VPMADD52LUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_madd52lo_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0])
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMADD52LUQ" form="xmm {k}, xmm, xmm" xed="VPMADD52LUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_madd52lo_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the low 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[51:0])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMADD52LUQ" form="xmm {z}, xmm, xmm" xed="VPMADD52LUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_madd52hi_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADD52HUQ" form="zmm, zmm, zmm" xed="VPMADD52HUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_madd52hi_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52])
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADD52HUQ" form="zmm {k}, zmm, zmm" xed="VPMADD52HUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_madd52hi_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADD52HUQ" form="zmm {z}, zmm, zmm" xed="VPMADD52HUQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_madd52hi_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADD52HUQ" form="ymm, ymm, ymm" xed="VPMADD52HUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_madd52hi_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52])
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADD52HUQ" form="ymm {k}, ymm, ymm" xed="VPMADD52HUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_madd52hi_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMADD52HUQ" form="ymm {z}, ymm, ymm" xed="VPMADD52HUQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_madd52hi_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMADD52HUQ" form="xmm, xmm, xmm" xed="VPMADD52HUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_madd52hi_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52])
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMADD52HUQ" form="xmm {k}, xmm, xmm" xed="VPMADD52HUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_madd52hi_epu64">
+ <CPUID>AVX512IFMA52</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Multiply packed unsigned 52-bit integers in each 64-bit element of "b" and "c" to form a 104-bit intermediate result. Add the high 52-bit unsigned integer from the intermediate result with the corresponding unsigned 64-bit integer in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ZeroExtend64(b[i+51:i]) * ZeroExtend64(c[i+51:i])
+ dst[i+63:i] := a[i+63:i] + ZeroExtend64(tmp[103:52])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMADD52HUQ" form="xmm {z}, xmm, xmm" xed="VPMADD52HUQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_prefetch_i64gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Load</category>
+ <return type="void"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged in cache. "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.</description>
+ <operation>
+FOR j:= 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ENDFOR
+ </operation>
+ <instruction name="VGATHERPF0QPS" form="vm64z" xed="VGATHERPF0QPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VGATHERPF1QPS" form="vm64z" xed="VGATHERPF1QPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_prefetch_i64gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Load</category>
+ <return type="void"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch single-precision (32-bit) floating-point elements from memory using 64-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged in cache using writemask "k" (elements are only brought into cache when their corresponding mask bit is set). "scale" should be 1, 2, 4 or 8.. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.</description>
+ <operation>
+FOR j:= 0 to 7
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VGATHERPF0QPS" form="vm64z {k}" xed="VGATHERPF0QPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VGATHERPF1QPS" form="vm64z {k}" xed="VGATHERPF1QPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_prefetch_i64scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch single-precision (32-bit) floating-point elements with intent to write into memory using 64-bit indices. Elements are prefetched into cache level "hint", where "hint" is 0 or 1. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERPF0QPS" form="vm64z" xed="VSCATTERPF0QPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VSCATTERPF1QPS" form="vm64z" xed="VSCATTERPF1QPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_prefetch_i64scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch single-precision (32-bit) floating-point elements with intent to write into memory using 64-bit indices. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. 32-bit elements are stored at addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not brought into cache when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERPF0QPS" form="vm64z {k}" xed="VSCATTERPF0QPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VSCATTERPF1QPS" form="vm64z {k}" xed="VSCATTERPF1QPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_prefetch_i32gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Load</category>
+ <return type="void"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged in cache. "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+63:addr], hint)
+ENDFOR
+ </operation>
+ <instruction name="VGATHERPF0DPD" form="vm32y" xed="VGATHERPF0DPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VGATHERPF1DPD" form="vm32y" xed="VGATHERPF1DPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_prefetch_i32gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Load</category>
+ <return type="void"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch double-precision (64-bit) floating-point elements from memory using 32-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged in cache using writemask "k" (elements are brought into cache only when their corresponding mask bits are set). "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+63:addr], hint)
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VGATHERPF0DPD" form="vm32y {k}" xed="VGATHERPF0DPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VGATHERPF1DPD" form="vm32y {k}" xed="VGATHERPF1DPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_prefetch_i32scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch double-precision (64-bit) floating-point elements with intent to write using 32-bit indices. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. 64-bit elements are brought into cache from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+63:addr], hint)
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERPF0DPD" form="vm32y" xed="VSCATTERPF0DPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VSCATTERPF1DPD" form="vm32y" xed="VSCATTERPF1DPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_prefetch_i32scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="vindex" etype="SI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch double-precision (64-bit) floating-point elements with intent to write using 32-bit indices. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. 64-bit elements are brought into cache from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not brought into cache when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+63:addr], hint)
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERPF0DPD" form="vm32y {k}" xed="VSCATTERPF0DPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VSCATTERPF1DPD" form="vm32y {k}" xed="VSCATTERPF1DPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_prefetch_i64gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Load</category>
+ <return type="void"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch double-precision (64-bit) floating-point elements from memory into cache level specified by "hint" using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+63:addr], hint)
+ENDFOR
+ </operation>
+ <instruction name="VGATHERPF0QPD" form="vm32z" xed="VGATHERPF0QPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VGATHERPF1QPD" form="vm32z" xed="VGATHERPF1QPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_prefetch_i64gather_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Load</category>
+ <return type="void"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="base_addr" etype="FP64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch double-precision (64-bit) floating-point elements from memory into cache level specified by "hint" using 64-bit indices. 64-bit elements are loaded from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). Prefetched elements are merged in cache using writemask "k" (elements are copied from memory when the corresponding mask bit is set). "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+63:addr], hint)
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VGATHERPF0QPD" form="vm32z {k}" xed="VGATHERPF0QPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VGATHERPF1QPD" form="vm32z {k}" xed="VGATHERPF1QPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_prefetch_i64scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch double-precision (64-bit) floating-point elements with intent to write into memory using 64-bit indices. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. 64-bit elements are brought into cache from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale"). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+63:addr], hint)
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERPF0QPD" form="vm32z" xed="VSCATTERPF0QPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VSCATTERPF1QPD" form="vm32z" xed="VSCATTERPF1QPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_prefetch_i64scatter_pd">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch double-precision (64-bit) floating-point elements with intent to write into memory using 64-bit indices. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. 64-bit elements are brought into cache from addresses starting at "base_addr" and offset by each 64-bit element in "vindex" (each index is scaled by the factor in "scale") subject to mask "k" (elements are not brought into cache when the corresponding mask bit is not set). "scale" should be 1, 2, 4 or 8.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+63:addr], hint)
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERPF0QPD" form="vm32z {k}" xed="VSCATTERPF0QPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VSCATTERPF1QPD" form="vm32z {k}" xed="VSCATTERPF1QPD_MEMf64_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_prefetch_i32gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="void"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetch single-precision (32-bit) floating-point elements from memory using 32-bit indices. 32-bit elements are loaded from addresses starting at "base_addr" and offset by each 32-bit element in "vindex" (each index is scaled by the factor in "scale"). Gathered elements are merged in cache using writemask "k" (elements are brought into cache only when their corresponding mask bits are set). "scale" should be 1, 2, 4 or 8. The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VGATHERPF0DPS" form="vm32y {k}" xed="VGATHERPF0DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VGATHERPF1DPS" form="vm32y {k}" xed="VGATHERPF1DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_prefetch_i32extgather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="void"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="void const *" varname="base_addr" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetches a set of 16 single-precision (32-bit) memory locations pointed by base address "base_addr" and 32-bit integer index vector "vindex" with scale "scale" to L1 or L2 level of cache depending on the value of "hint". The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.
+The "conv" parameter specifies the granularity used by compilers to better encode the instruction. It should be the same as the "conv" parameter specified for the subsequent gather intrinsic.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ENDFOR
+ </operation>
+ <instruction name="VGATHERPF0DPS" form="m512" xed="VGATHERPF0DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VGATHERPF1DPS" form="m512" xed="VGATHERPF1DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_prefetch_i32extgather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="void"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="base_addr" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetches a set of 16 single-precision (32-bit) memory locations pointed by base address "base_addr" and 32-bit integer index vector "vindex" with scale "scale" to L1 or L2 level of cache depending on the value of "hint". Gathered elements are merged in cache using writemask "k" (elements are brought into cache only when their corresponding mask bits are set). The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.
+The "conv" parameter specifies the granularity used by compilers to better encode the instruction. It should be the same as the "conv" parameter specified for the subsequent gather intrinsic.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VGATHERPF0DPS" form="m512 {k}" xed="VGATHERPF0DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VGATHERPF1DPS" form="m512 {k}" xed="VGATHERPF1DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_prefetch_i32extscatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetches a set of 16 single-precision (32-bit) memory locations pointed by base address "base_addr" and 32-bit integer index vector "vindex" with scale "scale" to L1 or L2 level of cache depending on the value of "hint", with a request for exclusive ownership. The "hint" parameter may be one of the following: _MM_HINT_T0 = 1 for prefetching to L1 cache, _MM_HINT_T1 = 2 for prefetching to L2 cache, _MM_HINT_T2 = 3 for prefetching to L2 cache non-temporal, _MM_HINT_NTA = 0 for prefetching to L1 cache non-temporal. The "conv" parameter specifies the granularity used by compilers to better encode the instruction. It should be the same as the "conv" parameter specified for the subsequent scatter intrinsic.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERPF0DPS" form="m512" xed="VSCATTERPF0DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VSCATTERPF1DPS" form="m512" xed="VSCATTERPF1DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_prefetch_i32extscatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetches a set of 16 single-precision (32-bit) memory locations pointed by base address "base_addr" and 32-bit integer index vector "vindex" with scale "scale" to L1 or L2 level of cache depending on the value of "hint". The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.
+The "conv" parameter specifies the granularity used by compilers to better encode the instruction. It should be the same as the "conv" parameter specified for the subsequent gather intrinsic. Only those elements whose corresponding mask bit in "k" is set are loaded into cache.</description>
+ <operation>
+cachev := 0
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERPF0DPS" form="m512 {k}" xed="VSCATTERPF0DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VSCATTERPF1DPS" form="m512 {k}" xed="VSCATTERPF1DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_prefetch_i32gather_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF/KNCNI</CPUID>
+ <category>Load</category>
+ <return type="void"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="void const*" varname="base_addr" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetches 16 single-precision (32-bit) floating-point elements in memory starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale". The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ENDFOR
+ </operation>
+ <instruction name="VGATHERPF0DPS" form="m512" xed="VGATHERPF0DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VGATHERPF1DPS" form="m512" xed="VGATHERPF1DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_prefetch_i32scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetches 16 single-precision (32-bit) floating-point elements in memory starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale". The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERPF0DPS" form="m512" xed="VSCATTERPF0DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VSCATTERPF1DPS" form="m512" xed="VSCATTERPF1DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512/KNC" name="_mm512_mask_prefetch_i32scatter_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512PF/KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="IMM" hint="TRUE" immtype="_MM_HINT_PREFETCH"/>
+ <description>Prefetches 16 single-precision (32-bit) floating-point elements in memory starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale". The "hint" parameter may be 1 (_MM_HINT_T0) for prefetching to L1 cache, or 2 (_MM_HINT_T1) for prefetching to L2 cache. Only those elements whose corresponding mask bit in "k" is set are loaded into the desired cache.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ Prefetch(MEM[addr+31:addr], hint)
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VSCATTERPF0DPS" form="m512 {k}" xed="VSCATTERPF0DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <instruction name="VSCATTERPF1DPS" form="m512 {k}" xed="VSCATTERPF1DPS_MEMf32_MASKmskw_AVX512PF_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_popcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POPCNT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTQ" form="ymm {z}, ymm" xed="VPOPCNTQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_popcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POPCNT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTQ" form="ymm {k}, ymm" xed="VPOPCNTQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_popcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <description>Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := POPCNT(a[i+63:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTQ" form="ymm, ymm" xed="VPOPCNTQ_YMMu64_MASKmskw_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_popcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POPCNT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTQ" form="xmm {z}, xmm" xed="VPOPCNTQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_popcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POPCNT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTQ" form="xmm {k}, xmm" xed="VPOPCNTQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_popcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := POPCNT(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTQ" form="xmm, xmm" xed="VPOPCNTQ_XMMu64_MASKmskw_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm256_popcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := POPCNT(a[i+31:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTD" form="ymm, ymm" xed="VPOPCNTD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm256_mask_popcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POPCNT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTD" form="ymm {k}, ymm" xed="VPOPCNTD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm256_maskz_popcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <description>Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POPCNT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTD" form="ymm {z}, ymm" xed="VPOPCNTD_YMMu32_MASKmskw_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_popcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := POPCNT(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTD" form="xmm, xmm" xed="VPOPCNTD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_mask_popcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POPCNT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTD" form="xmm {k}, xmm" xed="VPOPCNTD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_maskz_popcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POPCNT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTD" form="xmm {z}, xmm" xed="VPOPCNTD_XMMu32_MASKmskw_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_popcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := POPCNT(a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTD" form="zmm, zmm" xed="VPOPCNTD_ZMMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_mask_popcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POPCNT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTD" form="zmm {k}, zmm" xed="VPOPCNTD_ZMMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_maskz_popcnt_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <description>Count the number of logical 1 bits in packed 32-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := POPCNT(a[i+31:i])
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTD" form="zmm {z}, zmm" xed="VPOPCNTD_ZMMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_popcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := POPCNT(a[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTQ" form="zmm, zmm" xed="VPOPCNTQ_ZMMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_mask_popcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POPCNT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTQ" form="zmm {k}, zmm" xed="VPOPCNTQ_ZMMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_maskz_popcnt_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512VPOPCNTDQ</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <description>Count the number of logical 1 bits in packed 64-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := POPCNT(a[i+63:i])
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTQ" form="zmm {z}, zmm" xed="VPOPCNTQ_ZMMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_4fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__m512" varname="a0" etype="FP32"/>
+ <parameter type="__m512" varname="a1" etype="FP32"/>
+ <parameter type="__m512" varname="a2" etype="FP32"/>
+ <parameter type="__m512" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by the 4 corresponding packed elements in "b", accumulate with the corresponding elements in "src", and store the results in "dst".</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ FOR m := 0 to 3
+ addr := b + m * 32
+ dst.fp32[i] := dst.fp32[i] + a{m}.fp32[i] * Cast_FP32(MEM[addr+31:addr])
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="V4FMADDPS" form="zmm, zmm, m128" xed="V4FMADDPS_ZMMf32_MASKmskw_ZMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_mask_4fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a0" etype="FP32"/>
+ <parameter type="__m512" varname="a1" etype="FP32"/>
+ <parameter type="__m512" varname="a2" etype="FP32"/>
+ <parameter type="__m512" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by the 4 corresponding packed elements in "b", accumulate with the corresponding elements in "src", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ FOR m := 0 to 3
+ addr := b + m * 32
+ IF k[i]
+ dst.fp32[i] := dst.fp32[i] + a{m}.fp32[i] * Cast_FP32(MEM[addr+31:addr])
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="V4FMADDPS" form="zmm {k}, zmm, m128" xed="V4FMADDPS_ZMMf32_MASKmskw_ZMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_maskz_4fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__m512" varname="a0" etype="FP32"/>
+ <parameter type="__m512" varname="a1" etype="FP32"/>
+ <parameter type="__m512" varname="a2" etype="FP32"/>
+ <parameter type="__m512" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by the 4 corresponding packed elements in "b", accumulate with the corresponding elements in "src", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ FOR m := 0 to 3
+ addr := b + m * 32
+ IF k[i]
+ dst.fp32[i] := dst.fp32[i] + a{m}.fp32[i] * Cast_FP32(MEM[addr+31:addr])
+ ELSE
+ dst.fp32[i] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="V4FMADDPS" form="zmm {z}, zmm, m128" xed="V4FMADDPS_ZMMf32_MASKmskw_ZMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_4fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__m512" varname="a0" etype="FP32"/>
+ <parameter type="__m512" varname="a1" etype="FP32"/>
+ <parameter type="__m512" varname="a2" etype="FP32"/>
+ <parameter type="__m512" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by the 4 corresponding packed elements in "b", accumulate the negated intermediate result with the corresponding elements in "src", and store the results in "dst".</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ FOR m := 0 to 3
+ addr := b + m * 32
+ dst.fp32[i] := dst.fp32[i] - a{m}.fp32[i] * Cast_FP32(MEM[addr+31:addr])
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="V4FNMADDPS" form="zmm, zmm, m128" xed="V4FNMADDPS_ZMMf32_MASKmskw_ZMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_mask_4fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a0" etype="FP32"/>
+ <parameter type="__m512" varname="a1" etype="FP32"/>
+ <parameter type="__m512" varname="a2" etype="FP32"/>
+ <parameter type="__m512" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by the 4 corresponding packed elements in "b", accumulate the negated intermediate result with the corresponding elements in "src", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ FOR m := 0 to 3
+ addr := b + m * 32
+ IF k[i]
+ dst.fp32[i] := dst.fp32[i] - a{m}.fp32[i] * Cast_FP32(MEM[addr+31:addr])
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="V4FNMADDPS" form="zmm {k}, zmm, m128" xed="V4FNMADDPS_ZMMf32_MASKmskw_ZMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_maskz_4fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__m512" varname="a0" etype="FP32"/>
+ <parameter type="__m512" varname="a1" etype="FP32"/>
+ <parameter type="__m512" varname="a2" etype="FP32"/>
+ <parameter type="__m512" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by the 4 corresponding packed elements in "b", accumulate the negated intermediate result with the corresponding elements in "src", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ FOR m := 0 to 3
+ addr := b + m * 32
+ IF k[i]
+ dst.fp32[i] := dst.fp32[i] - a{m}.fp32[i] * Cast_FP32(MEM[addr+31:addr])
+ ELSE
+ dst.fp32[i] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="V4FNMADDPS" form="zmm {z}, zmm, m128" xed="V4FNMADDPS_ZMMf32_MASKmskw_ZMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_4fmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__m128" varname="a0" etype="FP32"/>
+ <parameter type="__m128" varname="a1" etype="FP32"/>
+ <parameter type="__m128" varname="a2" etype="FP32"/>
+ <parameter type="__m128" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by corresponding element in "b", accumulate with the lower element in "a", and store the result in the lower element of "dst".</description>
+ <operation>
+dst[127:0] := src[127:0]
+FOR m := 0 to 3
+ addr := b + m * 32
+ dst.fp32[0] := dst.fp32[0] + a{m}.fp32[0] * Cast_FP32(MEM[addr+31:addr])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="V4FMADDSS" form="xmm, xmm, m128" xed="V4FMADDSS_XMMf32_MASKmskw_XMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_mask_4fmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a0" etype="FP32"/>
+ <parameter type="__m128" varname="a1" etype="FP32"/>
+ <parameter type="__m128" varname="a2" etype="FP32"/>
+ <parameter type="__m128" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by corresponding element in "b", accumulate with the lower element in "a", and store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set).</description>
+ <operation>
+dst[127:0] := src[127:0]
+IF k[0]
+ FOR m := 0 to 3
+ addr := b + m * 32
+ dst.fp32[0] := dst.fp32[0] + a{m}.fp32[0] * Cast_FP32(MEM[addr+31:addr])
+ ENDFOR
+FI
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="V4FMADDSS" form="xmm {k}, xmm, m128" xed="V4FMADDSS_XMMf32_MASKmskw_XMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_maskz_4fmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__m128" varname="a0" etype="FP32"/>
+ <parameter type="__m128" varname="a1" etype="FP32"/>
+ <parameter type="__m128" varname="a2" etype="FP32"/>
+ <parameter type="__m128" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by corresponding element in "b", accumulate with the lower element in "a", and store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set).</description>
+ <operation>
+dst[127:0] := src[127:0]
+IF k[0]
+ FOR m := 0 to 3
+ addr := b + m * 32
+ dst.fp32[0] := dst.fp32[0] + a{m}.fp32[0] * Cast_FP32(MEM[addr+31:addr])
+ ENDFOR
+ELSE
+ dst.fp32[0] := 0
+FI
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="V4FMADDSS" form="xmm {z}, xmm, m128" xed="V4FMADDSS_XMMf32_MASKmskw_XMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_4fnmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__m128" varname="a0" etype="FP32"/>
+ <parameter type="__m128" varname="a1" etype="FP32"/>
+ <parameter type="__m128" varname="a2" etype="FP32"/>
+ <parameter type="__m128" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by corresponding element in "b", accumulate the negated intermediate result with the lower element in "src", and store the result in the lower element of "dst".</description>
+ <operation>
+dst[127:0] := src[127:0]
+FOR m := 0 to 3
+ addr := b + m * 32
+ dst.fp32[0] := dst.fp32[0] - a{m}.fp32[0] * Cast_FP32(MEM[addr+31:addr])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="V4FNMADDSS" form="xmm, xmm, m128" xed="V4FNMADDSS_XMMf32_MASKmskw_XMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_mask_4fnmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a0" etype="FP32"/>
+ <parameter type="__m128" varname="a1" etype="FP32"/>
+ <parameter type="__m128" varname="a2" etype="FP32"/>
+ <parameter type="__m128" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by corresponding element in "b", accumulate the negated intermediate result with the lower element in "src", and store the result in the lower element of "dst" using writemask "k" (the element is copied from "a" when mask bit 0 is not set).</description>
+ <operation>
+dst[127:0] := src[127:0]
+IF k[0]
+ FOR m := 0 to 3
+ addr := b + m * 32
+ dst.fp32[0] := dst.fp32[0] - a{m}.fp32[0] * Cast_FP32(MEM[addr+31:addr])
+ ENDFOR
+FI
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="V4FNMADDSS" form="xmm {k}, xmm, m128" xed="V4FNMADDSS_XMMf32_MASKmskw_XMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_maskz_4fnmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>AVX512_4FMAPS</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__m128" varname="a0" etype="FP32"/>
+ <parameter type="__m128" varname="a1" etype="FP32"/>
+ <parameter type="__m128" varname="a2" etype="FP32"/>
+ <parameter type="__m128" varname="a3" etype="FP32"/>
+ <parameter type="__m128 *" varname="b" etype="FP32" memwidth="128"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements specified in 4 consecutive operands "a0" through "a3" by corresponding element in "b", accumulate the negated intermediate result with the lower element in "src", and store the result in the lower element of "dst" using zeromask "k" (the element is zeroed out when mask bit 0 is not set).</description>
+ <operation>
+dst[127:0] := src[127:0]
+IF k[0]
+ FOR m := 0 to 3
+ addr := b + m * 32
+ dst.fp32[0] := dst.fp32[0] - a{m}.fp32[0] * Cast_FP32(MEM[addr+31:addr])
+ ENDFOR
+ELSE
+ dst.fp32[0] := 0
+FI
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="V4FNMADDSS" form="xmm {z}, xmm, m128" xed="V4FNMADDSS_XMMf32_MASKmskw_XMMf32_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_4dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_4VNNIW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a0" etype="SI16"/>
+ <parameter type="__m512i" varname="a1" etype="SI16"/>
+ <parameter type="__m512i" varname="a2" etype="SI16"/>
+ <parameter type="__m512i" varname="a3" etype="SI16"/>
+ <parameter type="__m128i *" varname="b" etype="SI16" memwidth="128"/>
+ <description>Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation, and store the results in "dst".</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ FOR m := 0 to 3
+ lim_base := b + m*32
+ t.dword := MEM[lim_base+31:lim_base]
+ p1.dword := SignExtend32(a{m}.word[2*i+0]) * SignExtend32(Cast_Int16(t.word[0]))
+ p2.dword := SignExtend32(a{m}.word[2*i+1]) * SignExtend32(Cast_Int16(t.word[1]))
+ dst.dword[i] := dst.dword[i] + p1.dword + p2.dword
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VP4DPWSSD" form="zmm, zmm, m128" xed="VP4DPWSSD_ZMMi32_MASKmskw_ZMMi16_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_mask_4dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_4VNNIW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a0" etype="SI16"/>
+ <parameter type="__m512i" varname="a1" etype="SI16"/>
+ <parameter type="__m512i" varname="a2" etype="SI16"/>
+ <parameter type="__m512i" varname="a3" etype="SI16"/>
+ <parameter type="__m128i *" varname="b" etype="SI16" memwidth="128"/>
+ <description>Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation with mask, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ IF k[i]
+ FOR m := 0 to 3
+ lim_base := b + m*32
+ t.dword := MEM[lim_base+31:lim_base]
+ p1.dword := SignExtend32(a{m}.word[2*i+0]) * SignExtend32(Cast_Int16(t.word[0]))
+ p2.dword := SignExtend32(a{m}.word[2*i+1]) * SignExtend32(Cast_Int16(t.word[1]))
+ dst.dword[i] := dst.dword[i] + p1.dword + p2.dword
+ ENDFOR
+ ELSE
+ dst.dword[i] := src.dword[i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VP4DPWSSD" form="zmm {k}, zmm, m128" xed="VP4DPWSSD_ZMMi32_MASKmskw_ZMMi16_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_maskz_4dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_4VNNIW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a0" etype="SI16"/>
+ <parameter type="__m512i" varname="a1" etype="SI16"/>
+ <parameter type="__m512i" varname="a2" etype="SI16"/>
+ <parameter type="__m512i" varname="a3" etype="SI16"/>
+ <parameter type="__m128i *" varname="b" etype="SI16" memwidth="128"/>
+ <description>Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation with mask, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ IF k[i]
+ FOR m := 0 to 3
+ lim_base := b + m*32
+ t.dword := MEM[lim_base+31:lim_base]
+ p1.dword := SignExtend32(a{m}.word[2*i+0]) * SignExtend32(Cast_Int16(t.word[0]))
+ p2.dword := SignExtend32(a{m}.word[2*i+1]) * SignExtend32(Cast_Int16(t.word[1]))
+ dst.dword[i] := dst.dword[i] + p1.dword + p2.dword
+ ENDFOR
+ ELSE
+ dst.dword[i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VP4DPWSSD" form="zmm {z}, zmm, m128" xed="VP4DPWSSD_ZMMi32_MASKmskw_ZMMi16_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_4dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_4VNNIW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a0" etype="SI16"/>
+ <parameter type="__m512i" varname="a1" etype="SI16"/>
+ <parameter type="__m512i" varname="a2" etype="SI16"/>
+ <parameter type="__m512i" varname="a3" etype="SI16"/>
+ <parameter type="__m128i *" varname="b" etype="SI16" memwidth="128"/>
+ <description>Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation and signed saturation, and store the results in "dst".</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ FOR m := 0 to 3
+ lim_base := b + m*32
+ t.dword := MEM[lim_base+31:lim_base]
+ p1.dword := SignExtend32(a{m}.word[2*i+0]) * SignExtend32(Cast_Int16(t.word[0]))
+ p2.dword := SignExtend32(a{m}.word[2*i+1]) * SignExtend32(Cast_Int16(t.word[1]))
+ dst.dword[i] := Saturate32(dst.dword[i] + p1.dword + p2.dword)
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VP4DPWSSDS" form="zmm, zmm, m128" xed="VP4DPWSSDS_ZMMi32_MASKmskw_ZMMi16_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_mask_4dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_4VNNIW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a0" etype="SI16"/>
+ <parameter type="__m512i" varname="a1" etype="SI16"/>
+ <parameter type="__m512i" varname="a2" etype="SI16"/>
+ <parameter type="__m512i" varname="a3" etype="SI16"/>
+ <parameter type="__m128i *" varname="b" etype="SI16" memwidth="128"/>
+ <description>Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation with mask and signed saturation, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set)..</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ IF k[i]
+ FOR m := 0 to 3
+ lim_base := b + m*32
+ t.dword := MEM[lim_base+31:lim_base]
+ p1.dword := SignExtend32(a{m}.word[2*i+0]) * SignExtend32(Cast_Int16(t.word[0]))
+ p2.dword := SignExtend32(a{m}.word[2*i+1]) * SignExtend32(Cast_Int16(t.word[1]))
+ dst.dword[i] := Saturate32(dst.dword[i] + p1.dword + p2.dword)
+ ENDFOR
+ ELSE
+ dst.dword[i] := src.dword[i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VP4DPWSSDS" form="zmm {k}, zmm, m128" xed="VP4DPWSSDS_ZMMi32_MASKmskw_ZMMi16_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_maskz_4dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_4VNNIW</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a0" etype="SI16"/>
+ <parameter type="__m512i" varname="a1" etype="SI16"/>
+ <parameter type="__m512i" varname="a2" etype="SI16"/>
+ <parameter type="__m512i" varname="a3" etype="SI16"/>
+ <parameter type="__m128i *" varname="b" etype="SI16" memwidth="128"/>
+ <description>Compute 4 sequential operand source-block dot-products of two signed 16-bit element operands with 32-bit element accumulation with mask and signed saturation, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set)..</description>
+ <operation>
+dst[511:0] := src[511:0]
+FOR i := 0 to 15
+ IF k[i]
+ FOR m := 0 to 3
+ lim_base := b + m*32
+ t.dword := MEM[lim_base+31:lim_base]
+ p1.dword := SignExtend32(a{m}.word[2*i+0]) * SignExtend32(Cast_Int16(t.word[0]))
+ p2.dword := SignExtend32(a{m}.word[2*i+1]) * SignExtend32(Cast_Int16(t.word[1]))
+ dst.dword[i] := Saturate32(dst.dword[i] + p1.dword + p2.dword)
+ ENDFOR
+ ELSE
+ dst.dword[i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VP4DPWSSDS" form="zmm {z}, zmm, m128" xed="VP4DPWSSDS_ZMMi32_MASKmskw_ZMMi16_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtne2ps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m128bh" varname="dst" etype="BF16"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ IF j &lt; 4
+ t := b.fp32[j]
+ ELSE
+ t := a.fp32[j-4]
+ FI
+ dst.word[j] := Convert_FP32_To_BF16(t)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTNE2PS2BF16" form="xmm, xmm, xmm" xed="VCVTNE2PS2BF16_XMMbf16_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtne2ps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m128bh" varname="dst" etype="BF16"/>
+ <parameter type="__m128bh" varname="src" etype="BF16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ IF j &lt; 4
+ t := b.fp32[j]
+ ELSE
+ t := a.fp32[j-4]
+ FI
+ dst.word[j] := Convert_FP32_To_BF16(t)
+ ELSE
+ dst.word[j] := src.word[j]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTNE2PS2BF16" form="xmm {k}, xmm, xmm" xed="VCVTNE2PS2BF16_XMMbf16_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtne2ps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m128bh" varname="dst" etype="BF16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ IF j &lt; 4
+ t := b.fp32[j]
+ ELSE
+ t := a.fp32[j-4]
+ FI
+ dst.word[j] := Convert_FP32_To_BF16(t)
+ ELSE
+ dst.word[j] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTNE2PS2BF16" form="xmm {z}, xmm, xmm" xed="VCVTNE2PS2BF16_XMMbf16_MASKmskw_XMMf32_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtne2ps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m256bh" varname="dst" etype="BF16"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ IF j &lt; 8
+ t := b.fp32[j]
+ ELSE
+ t := a.fp32[j-8]
+ FI
+ dst.word[j] := Convert_FP32_To_BF16(t)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTNE2PS2BF16" form="ymm, ymm, ymm" xed="VCVTNE2PS2BF16_YMMbf16_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtne2ps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m256bh" varname="dst" etype="BF16"/>
+ <parameter type="__m256bh" varname="src" etype="BF16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ IF j &lt; 8
+ t := b.fp32[j]
+ ELSE
+ t := a.fp32[j-8]
+ FI
+ dst.word[j] := Convert_FP32_To_BF16(t)
+ ELSE
+ dst.word[j] := src.word[j]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTNE2PS2BF16" form="ymm {k}, ymm, ymm" xed="VCVTNE2PS2BF16_YMMbf16_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtne2ps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m256bh" varname="dst" etype="BF16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ IF j &lt; 8
+ t := b.fp32[j]
+ ELSE
+ t := a.fp32[j-8]
+ FI
+ dst.word[j] := Convert_FP32_To_BF16(t)
+ ELSE
+ dst.word[j] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTNE2PS2BF16" form="ymm {z}, ymm, ymm" xed="VCVTNE2PS2BF16_YMMbf16_MASKmskw_YMMf32_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtne2ps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512bh" varname="dst" etype="BF16"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ IF j &lt; 16
+ t := b.fp32[j]
+ ELSE
+ t := a.fp32[j-16]
+ FI
+ dst.word[j] := Convert_FP32_To_BF16(t)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTNE2PS2BF16" form="zmm, zmm, zmm" xed="VCVTNE2PS2BF16_ZMMbf16_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtne2ps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512bh" varname="dst" etype="BF16"/>
+ <parameter type="__m512bh" varname="src" etype="BF16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ IF k[j]
+ IF j &lt; 16
+ t := b.fp32[j]
+ ELSE
+ t := a.fp32[j-16]
+ FI
+ dst.word[j] := Convert_FP32_To_BF16(t)
+ ELSE
+ dst.word[j] := src.word[j]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTNE2PS2BF16" form="zmm {k}, zmm, zmm" xed="VCVTNE2PS2BF16_ZMMbf16_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtne2ps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m512bh" varname="dst" etype="BF16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in two vectors "a" and "b" to packed BF16 (16-bit) floating-point elements, and store the results in single vector "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ IF k[j]
+ IF j &lt; 16
+ t := b.fp32[j]
+ ELSE
+ t := a.fp32[j-16]
+ FI
+ dst.word[j] := Convert_FP32_To_BF16(t)
+ ELSE
+ dst.word[j] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTNE2PS2BF16" form="zmm {z}, zmm, zmm" xed="VCVTNE2PS2BF16_ZMMbf16_MASKmskw_ZMMf32_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_cvtneps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m128bh" varname="dst" etype="BF16"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ dst.word[j] := Convert_FP32_To_BF16(a.fp32[j])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTNEPS2BF16" form="xmm, xmm" xed="VCVTNEPS2BF16_XMMbf16_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_cvtneps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m128bh" varname="dst" etype="BF16"/>
+ <parameter type="__m128bh" varname="src" etype="BF16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ IF k[j]
+ dst.word[j] := Convert_FP32_To_BF16(a.fp32[j])
+ ELSE
+ dst.word[j] := src.word[j]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTNEPS2BF16" form="xmm {k}, xmm" xed="VCVTNEPS2BF16_XMMbf16_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_cvtneps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m128bh" varname="dst" etype="BF16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ IF k[j]
+ dst.word[j] := Convert_FP32_To_BF16(a.fp32[j])
+ ELSE
+ dst.word[j] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTNEPS2BF16" form="xmm {z}, xmm" xed="VCVTNEPS2BF16_XMMbf16_MASKmskw_XMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_cvtneps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m128bh" varname="dst" etype="BF16"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ dst.word[j] := Convert_FP32_To_BF16(a.fp32[j])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTNEPS2BF16" form="xmm, ymm" xed="VCVTNEPS2BF16_XMMbf16_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_cvtneps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m128bh" varname="dst" etype="BF16"/>
+ <parameter type="__m128bh" varname="src" etype="BF16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ dst.word[j] := Convert_FP32_To_BF16(a.fp32[j])
+ ELSE
+ dst.word[j] := src.word[j]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTNEPS2BF16" form="xmm {k}, ymm" xed="VCVTNEPS2BF16_XMMbf16_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_cvtneps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Convert</category>
+ <return type="__m128bh" varname="dst" etype="BF16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ dst.word[j] := Convert_FP32_To_BF16(a.fp32[j])
+ ELSE
+ dst.word[j] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTNEPS2BF16" form="xmm {z}, ymm" xed="VCVTNEPS2BF16_XMMbf16_MASKmskw_YMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_cvtneps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256bh" varname="dst" etype="BF16"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ dst.word[j] := Convert_FP32_To_BF16(a.fp32[j])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTNEPS2BF16" form="ymm, zmm" xed="VCVTNEPS2BF16_YMMbf16_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_cvtneps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256bh" varname="dst" etype="BF16"/>
+ <parameter type="__m256bh" varname="src" etype="BF16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ dst.word[j] := Convert_FP32_To_BF16(a.fp32[j])
+ ELSE
+ dst.word[j] := src.word[j]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTNEPS2BF16" form="ymm {k}, zmm" xed="VCVTNEPS2BF16_YMMbf16_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_cvtneps_pbh">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Convert</category>
+ <return type="__m256bh" varname="dst" etype="BF16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed BF16 (16-bit) floating-point elements, and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ dst.word[j] := Convert_FP32_To_BF16(a.fp32[j])
+ ELSE
+ dst.word[j] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTNEPS2BF16" form="ymm {z}, zmm" xed="VCVTNEPS2BF16_YMMbf16_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_dpbf16_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__m128bh" varname="a" etype="BF16"/>
+ <parameter type="__m128bh" varname="b" etype="BF16"/>
+ <description>Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst".</description>
+ <operation>
+DEFINE make_fp32(x[15:0]) {
+ y.fp32 := 0.0
+ y[31:16] := x[15:0]
+ RETURN y
+}
+dst := src
+FOR j := 0 to 3
+ dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1])
+ dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDPBF16PS" form="xmm, xmm, xmm" xed="VDPBF16PS_XMMf32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_dpbf16_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128bh" varname="a" etype="BF16"/>
+ <parameter type="__m128bh" varname="b" etype="BF16"/>
+ <description>Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE make_fp32(x[15:0]) {
+ y.fp32 := 0.0
+ y[31:16] := x[15:0]
+ RETURN y
+}
+dst := src
+FOR j := 0 to 3
+ IF k[j]
+ dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1])
+ dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0])
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDPBF16PS" form="xmm {k}, xmm, xmm" xed="VDPBF16PS_XMMf32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_dpbf16_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128" varname="src" etype="FP32"/>
+ <parameter type="__m128bh" varname="a" etype="BF16"/>
+ <parameter type="__m128bh" varname="b" etype="BF16"/>
+ <description>Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE make_fp32(x[15:0]) {
+ y.fp32 := 0.0
+ y[31:16] := x[15:0]
+ RETURN y
+}
+dst := src
+FOR j := 0 to 3
+ IF k[j]
+ dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1])
+ dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0])
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VDPBF16PS" form="xmm {z}, xmm, xmm" xed="VDPBF16PS_XMMf32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_dpbf16_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__m256bh" varname="a" etype="BF16"/>
+ <parameter type="__m256bh" varname="b" etype="BF16"/>
+ <description>Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst".</description>
+ <operation>
+DEFINE make_fp32(x[15:0]) {
+ y.fp32 := 0.0
+ y[31:16] := x[15:0]
+ RETURN y
+}
+dst := src
+FOR j := 0 to 7
+ dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1])
+ dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDPBF16PS" form="ymm, ymm, ymm" xed="VDPBF16PS_YMMf32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_dpbf16_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256bh" varname="a" etype="BF16"/>
+ <parameter type="__m256bh" varname="b" etype="BF16"/>
+ <description>Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE make_fp32(x[15:0]) {
+ y.fp32 := 0.0
+ y[31:16] := x[15:0]
+ RETURN y
+}
+dst := src
+FOR j := 0 to 7
+ IF k[j]
+ dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1])
+ dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0])
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDPBF16PS" form="ymm {k}, ymm, ymm" xed="VDPBF16PS_YMMf32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_dpbf16_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256" varname="src" etype="FP32"/>
+ <parameter type="__m256bh" varname="a" etype="BF16"/>
+ <parameter type="__m256bh" varname="b" etype="BF16"/>
+ <description>Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE make_fp32(x[15:0]) {
+ y.fp32 := 0.0
+ y[31:16] := x[15:0]
+ RETURN y
+}
+dst := src
+FOR j := 0 to 7
+ IF k[j]
+ dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1])
+ dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0])
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VDPBF16PS" form="ymm {z}, ymm, ymm" xed="VDPBF16PS_YMMf32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_dpbf16_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__m512bh" varname="a" etype="BF16"/>
+ <parameter type="__m512bh" varname="b" etype="BF16"/>
+ <description>Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst".</description>
+ <operation>
+DEFINE make_fp32(x[15:0]) {
+ y.fp32 := 0.0
+ y[31:16] := x[15:0]
+ RETURN y
+}
+dst := src
+FOR j := 0 to 15
+ dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1])
+ dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDPBF16PS" form="zmm, zmm, zmm" xed="VDPBF16PS_ZMMf32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_dpbf16_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512bh" varname="a" etype="BF16"/>
+ <parameter type="__m512bh" varname="b" etype="BF16"/>
+ <description>Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE make_fp32(x[15:0]) {
+ y.fp32 := 0.0
+ y[31:16] := x[15:0]
+ RETURN y
+}
+dst := src
+FOR j := 0 to 15
+ IF k[j]
+ dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1])
+ dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0])
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDPBF16PS" form="zmm {k}, zmm, zmm" xed="VDPBF16PS_ZMMf32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_dpbf16_ps">
+ <type>Floating Point</type>
+ <CPUID>AVX512_BF16</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__m512bh" varname="a" etype="BF16"/>
+ <parameter type="__m512bh" varname="b" etype="BF16"/>
+ <description>Compute dot-product of BF16 (16-bit) floating-point pairs in "a" and "b", accumulating the intermediate single-precision (32-bit) floating-point elements with elements in "src", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE make_fp32(x[15:0]) {
+ y.fp32 := 0.0
+ y[31:16] := x[15:0]
+ RETURN y
+}
+dst := src
+FOR j := 0 to 15
+ IF k[j]
+ dst.fp32[j] += make_fp32(a.bf16[2*j+1]) * make_fp32(b.bf16[2*j+1])
+ dst.fp32[j] += make_fp32(a.bf16[2*j+0]) * make_fp32(b.bf16[2*j+0])
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VDPBF16PS" form="zmm {z}, zmm, zmm" xed="VDPBF16PS_ZMMf32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_bitshuffle_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__mmask64" varname="dst" etype="MASK"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR i := 0 to 7 //Qword
+ FOR j := 0 to 7 // Byte
+ IF k[i*8+j]
+ m := c.qword[i].byte[j] &amp; 0x3F
+ dst[i*8+j] := b.qword[i].bit[m]
+ ELSE
+ dst[i*8+j] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPSHUFBITQMB" form="k {k}, zmm, zmm" xed="VPSHUFBITQMB_MASKmskw_MASKmskw_ZMMu64_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_bitshuffle_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__mmask64" varname="dst" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst".</description>
+ <operation>
+FOR i := 0 to 7 //Qword
+ FOR j := 0 to 7 // Byte
+ m := c.qword[i].byte[j] &amp; 0x3F
+ dst[i*8+j] := b.qword[i].bit[m]
+ ENDFOR
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VPSHUFBITQMB" form="k, zmm, zmm" xed="VPSHUFBITQMB_MASKmskw_MASKmskw_ZMMu64_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_bitshuffle_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__mmask32" varname="dst" etype="MASK"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR i := 0 to 3 //Qword
+ FOR j := 0 to 7 // Byte
+ IF k[i*8+j]
+ m := c.qword[i].byte[j] &amp; 0x3F
+ dst[i*8+j] := b.qword[i].bit[m]
+ ELSE
+ dst[i*8+j] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPSHUFBITQMB" form="k {k}, ymm, ymm" xed="VPSHUFBITQMB_MASKmskw_MASKmskw_YMMu64_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_bitshuffle_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__mmask32" varname="dst" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst".</description>
+ <operation>
+FOR i := 0 to 3 //Qword
+ FOR j := 0 to 7 // Byte
+ m := c.qword[i].byte[j] &amp; 0x3F
+ dst[i*8+j] := b.qword[i].bit[m]
+ ENDFOR
+ENDFOR
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="VPSHUFBITQMB" form="k, ymm, ymm" xed="VPSHUFBITQMB_MASKmskw_MASKmskw_YMMu64_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_bitshuffle_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__mmask16" varname="dst" etype="MASK"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR i := 0 to 1 //Qword
+ FOR j := 0 to 7 // Byte
+ IF k[i*8+j]
+ m := c.qword[i].byte[j] &amp; 0x3F
+ dst[i*8+j] := b.qword[i].bit[m]
+ ELSE
+ dst[i*8+j] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:16] := 0
+ </operation>
+ <instruction name="VPSHUFBITQMB" form="k {k}, xmm, xmm" xed="VPSHUFBITQMB_MASKmskw_MASKmskw_XMMu64_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_bitshuffle_epi64_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__mmask16" varname="dst" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Gather 64 bits from "b" using selection bits in "c". For each 64-bit element in "b", gather 8 bits from the 64-bit element in "b" at 8 bit position controlled by the 8 corresponding 8-bit elements of "c", and store the result in the corresponding 8-bit element of "dst".</description>
+ <operation>
+FOR i := 0 to 1 //Qword
+ FOR j := 0 to 7 // Byte
+ m := c.qword[i].byte[j] &amp; 0x3F
+ dst[i*8+j] := b.qword[i].bit[m]
+ ENDFOR
+ENDFOR
+dst[MAX:16] := 0
+ </operation>
+ <instruction name="VPSHUFBITQMB" form="k, xmm, xmm" xed="VPSHUFBITQMB_MASKmskw_MASKmskw_XMMu64_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_popcnt_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := POPCNT(a[i+15:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTW" form="zmm, zmm" xed="VPOPCNTW_ZMMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_mask_popcnt_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := POPCNT(a[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTW" form="zmm {k}, zmm" xed="VPOPCNTW_ZMMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_maskz_popcnt_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := POPCNT(a[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTW" form="zmm {z}, zmm" xed="VPOPCNTW_ZMMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm256_popcnt_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := POPCNT(a[i+15:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTW" form="ymm, ymm" xed="VPOPCNTW_YMMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm256_mask_popcnt_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := POPCNT(a[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTW" form="ymm {k}, ymm" xed="VPOPCNTW_YMMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm256_maskz_popcnt_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := POPCNT(a[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTW" form="ymm {z}, ymm" xed="VPOPCNTW_YMMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_popcnt_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := POPCNT(a[i+15:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTW" form="xmm, xmm" xed="VPOPCNTW_XMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_mask_popcnt_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := POPCNT(a[i+15:i])
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTW" form="xmm {k}, xmm" xed="VPOPCNTW_XMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_maskz_popcnt_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Count the number of logical 1 bits in packed 16-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := POPCNT(a[i+15:i])
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTW" form="xmm {z}, xmm" xed="VPOPCNTW_XMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_popcnt_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 63
+ i := j*8
+ dst[i+7:i] := POPCNT(a[i+7:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTB" form="zmm, zmm" xed="VPOPCNTB_ZMMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_mask_popcnt_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := POPCNT(a[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTB" form="zmm {k}, zmm" xed="VPOPCNTB_ZMMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm512_maskz_popcnt_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := POPCNT(a[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPOPCNTB" form="zmm {z}, zmm" xed="VPOPCNTB_ZMMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm256_popcnt_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 31
+ i := j*8
+ dst[i+7:i] := POPCNT(a[i+7:i])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTB" form="ymm, ymm" xed="VPOPCNTB_YMMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm256_mask_popcnt_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := POPCNT(a[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTB" form="ymm {k}, ymm" xed="VPOPCNTB_YMMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm256_maskz_popcnt_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := POPCNT(a[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPOPCNTB" form="ymm {z}, ymm" xed="VPOPCNTB_YMMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_popcnt_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := POPCNT(a[i+7:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTB" form="xmm, xmm" xed="VPOPCNTB_XMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_mask_popcnt_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := POPCNT(a[i+7:i])
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTB" form="xmm {k}, xmm" xed="VPOPCNTB_XMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" vexEq="TRUE" name="_mm_maskz_popcnt_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_BITALG</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Count the number of logical 1 bits in packed 8-bit integers in "a", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE POPCNT(a) {
+ count := 0
+ DO WHILE a &gt; 0
+ count += a[0]
+ a &gt;&gt;= 1
+ OD
+ RETURN count
+}
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := POPCNT(a[i+7:i])
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPOPCNTB" form="xmm {z}, xmm" xed="VPOPCNTB_XMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_multishift_epi64_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst".</description>
+ <operation>
+FOR i := 0 to 7
+ q := i * 64
+ FOR j := 0 to 7
+ tmp8 := 0
+ ctrl := a[q+j*8+7:q+j*8] &amp; 63
+ FOR l := 0 to 7
+ tmp8[l] := b[q+((ctrl+l) &amp; 63)]
+ ENDFOR
+ dst[q+j*8+7:q+j*8] := tmp8[7:0]
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULTISHIFTQB" form="zmm, zmm, zmm" xed="VPMULTISHIFTQB_ZMMu8_MASKmskw_ZMMu8_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_multishift_epi64_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR i := 0 to 7
+ q := i * 64
+ FOR j := 0 to 7
+ tmp8 := 0
+ ctrl := a[q+j*8+7:q+j*8] &amp; 63
+ FOR l := 0 to 7
+ tmp8[l] := b[q+((ctrl+l) &amp; 63)]
+ ENDFOR
+ IF k[i*8+j]
+ dst[q+j*8+7:q+j*8] := tmp8[7:0]
+ ELSE
+ dst[q+j*8+7:q+j*8] := src[q+j*8+7:q+j*8]
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULTISHIFTQB" form="zmm {k}, zmm, zmm" xed="VPMULTISHIFTQB_ZMMu8_MASKmskw_ZMMu8_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_multishift_epi64_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR i := 0 to 7
+ q := i * 64
+ FOR j := 0 to 7
+ tmp8 := 0
+ ctrl := a[q+j*8+7:q+j*8] &amp; 63
+ FOR l := 0 to 7
+ tmp8[l] := b[q+((ctrl+l) &amp; 63)]
+ ENDFOR
+ IF k[i*8+j]
+ dst[q+j*8+7:q+j*8] := tmp8[7:0]
+ ELSE
+ dst[q+j*8+7:q+j*8] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULTISHIFTQB" form="zmm {z}, zmm, zmm" xed="VPMULTISHIFTQB_ZMMu8_MASKmskw_ZMMu8_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_multishift_epi64_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst".</description>
+ <operation>
+FOR i := 0 to 3
+ q := i * 64
+ FOR j := 0 to 7
+ tmp8 := 0
+ ctrl := a[q+j*8+7:q+j*8] &amp; 63
+ FOR l := 0 to 7
+ tmp8[l] := b[q+((ctrl+l) &amp; 63)]
+ ENDFOR
+ dst[q+j*8+7:q+j*8] := tmp8[7:0]
+ ENDFOR
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULTISHIFTQB" form="ymm, ymm, ymm" xed="VPMULTISHIFTQB_YMMu8_MASKmskw_YMMu8_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_multishift_epi64_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR i := 0 to 3
+ q := i * 64
+ FOR j := 0 to 7
+ tmp8 := 0
+ ctrl := a[q+j*8+7:q+j*8] &amp; 63
+ FOR l := 0 to 7
+ tmp8[l] := b[q+((ctrl+l) &amp; 63)]
+ ENDFOR
+ IF k[i*8+j]
+ dst[q+j*8+7:q+j*8] := tmp8[7:0]
+ ELSE
+ dst[q+j*8+7:q+j*8] := src[q+j*8+7:q+j*8]
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULTISHIFTQB" form="ymm {k}, ymm, ymm" xed="VPMULTISHIFTQB_YMMu8_MASKmskw_YMMu8_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_multishift_epi64_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR i := 0 to 3
+ q := i * 64
+ FOR j := 0 to 7
+ tmp8 := 0
+ ctrl := a[q+j*8+7:q+j*8] &amp; 63
+ FOR l := 0 to 7
+ tmp8[l] := b[q+((ctrl+l) &amp; 63)]
+ ENDFOR
+ IF k[i*8+j]
+ dst[q+j*8+7:q+j*8] := tmp8[7:0]
+ ELSE
+ dst[q+j*8+7:q+j*8] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPMULTISHIFTQB" form="ymm {z}, ymm, ymm" xed="VPMULTISHIFTQB_YMMu8_MASKmskw_YMMu8_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_multishift_epi64_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst".</description>
+ <operation>
+FOR i := 0 to 1
+ q := i * 64
+ FOR j := 0 to 7
+ tmp8 := 0
+ ctrl := a[q+j*8+7:q+j*8] &amp; 63
+ FOR l := 0 to 7
+ tmp8[l] := b[q+((ctrl+l) &amp; 63)]
+ ENDFOR
+ dst[q+j*8+7:q+j*8] := tmp8[7:0]
+ ENDFOR
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULTISHIFTQB" form="xmm, xmm, xmm" xed="VPMULTISHIFTQB_XMMu8_MASKmskw_XMMu8_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_multishift_epi64_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR i := 0 to 1
+ q := i * 64
+ FOR j := 0 to 7
+ tmp8 := 0
+ ctrl := a[q+j*8+7:q+j*8] &amp; 63
+ FOR l := 0 to 7
+ tmp8[l] := b[q+((ctrl+l) &amp; 63)]
+ ENDFOR
+ IF k[i*8+j]
+ dst[q+j*8+7:q+j*8] := tmp8[7:0]
+ ELSE
+ dst[q+j*8+7:q+j*8] := src[q+j*8+7:q+j*8]
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULTISHIFTQB" form="xmm {k}, xmm, xmm" xed="VPMULTISHIFTQB_XMMu8_MASKmskw_XMMu8_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_multishift_epi64_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>For each 64-bit element in "b", select 8 unaligned bytes using a byte-granular shift control within the corresponding 64-bit element of "a", and store the 8 assembled bytes to the corresponding 64-bit element of "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR i := 0 to 1
+ q := i * 64
+ FOR j := 0 to 7
+ tmp8 := 0
+ ctrl := a[q+j*8+7:q+j*8] &amp; 63
+ FOR l := 0 to 7
+ tmp8[l] := b[q+((ctrl+l) &amp; 63)]
+ ENDFOR
+ IF k[i*8+j]
+ dst[q+j*8+7:q+j*8] := tmp8[7:0]
+ ELSE
+ dst[q+j*8+7:q+j*8] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPMULTISHIFTQB" form="xmm {z}, xmm, xmm" xed="VPMULTISHIFTQB_XMMu8_MASKmskw_XMMu8_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutexvar_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="idx" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ id := idx[i+5:i]*8
+ dst[i+7:i] := a[id+7:id]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMB" form="zmm, zmm, zmm" xed="VPERMB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutexvar_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ id := idx[i+5:i]*8
+ IF k[j]
+ dst[i+7:i] := a[id+7:id]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMB" form="zmm {k}, zmm, zmm" xed="VPERMB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutexvar_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ id := idx[i+5:i]*8
+ IF k[j]
+ dst[i+7:i] := a[id+7:id]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMB" form="zmm {z}, zmm, zmm" xed="VPERMB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutexvar_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="idx" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ id := idx[i+4:i]*8
+ dst[i+7:i] := a[id+7:id]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMB" form="ymm, ymm, ymm" xed="VPERMB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutexvar_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ id := idx[i+4:i]*8
+ IF k[j]
+ dst[i+7:i] := a[id+7:id]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMB" form="ymm {k}, ymm, ymm" xed="VPERMB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutexvar_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" across lanes using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ id := idx[i+4:i]*8
+ IF k[j]
+ dst[i+7:i] := a[id+7:id]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMB" form="ymm {z}, ymm, ymm" xed="VPERMB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_permutexvar_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="idx" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ id := idx[i+3:i]*8
+ dst[i+7:i] := a[id+7:id]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMB" form="xmm, xmm, xmm" xed="VPERMB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permutexvar_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="idx" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ id := idx[i+3:i]*8
+ IF k[j]
+ dst[i+7:i] := a[id+7:id]
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMB" form="xmm {k}, xmm, xmm" xed="VPERMB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permutexvar_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="idx" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" using the corresponding index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ id := idx[i+3:i]*8
+ IF k[j]
+ dst[i+7:i] := a[id+7:id]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMB" form="xmm {z}, xmm, xmm" xed="VPERMB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="idx" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ off := 8*idx[i+5:i]
+ dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2B" form="zmm, zmm, zmm" xed="VPERMI2B_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="idx" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ off := 8*idx[i+5:i]
+ dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off]
+ ELSE
+ dst[i+7:i] := a[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMT2B" form="zmm {k}, zmm, zmm" xed="VPERMT2B_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask2_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="idx" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ off := 8*idx[i+5:i]
+ dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off]
+ ELSE
+ dst[i+7:i] := idx[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2B" form="zmm {k}, zmm, zmm" xed="VPERMI2B_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="idx" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ off := 8*idx[i+5:i]
+ dst[i+7:i] := idx[i+6] ? b[off+7:off] : a[off+7:off]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMI2B" form="zmm {z}, zmm, zmm" xed="VPERMI2B_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <instruction name="VPERMT2B" form="zmm {z}, zmm, zmm" xed="VPERMT2B_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="idx" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ off := 8*idx[i+4:i]
+ dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2B" form="ymm, ymm, ymm" xed="VPERMI2B_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="idx" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ off := 8*idx[i+4:i]
+ dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off]
+ ELSE
+ dst[i+7:i] := a[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMT2B" form="ymm {k}, ymm, ymm" xed="VPERMT2B_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask2_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="idx" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ off := 8*idx[i+4:i]
+ dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off]
+ ELSE
+ dst[i+7:i] := idx[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2B" form="ymm {k}, ymm, ymm" xed="VPERMI2B_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="idx" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" across lanes using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ off := 8*idx[i+4:i]
+ dst[i+7:i] := idx[i+5] ? b[off+7:off] : a[off+7:off]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPERMI2B" form="ymm {z}, ymm, ymm" xed="VPERMI2B_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <instruction name="VPERMT2B" form="ymm {z}, ymm, ymm" xed="VPERMT2B_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="idx" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ off := 8*idx[i+3:i]
+ dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2B" form="xmm, xmm, xmm" xed="VPERMI2B_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="idx" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ off := 8*idx[i+3:i]
+ dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off]
+ ELSE
+ dst[i+7:i] := a[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMT2B" form="xmm {k}, xmm, xmm" xed="VPERMT2B_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask2_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="idx" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ off := 8*idx[i+3:i]
+ dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off]
+ ELSE
+ dst[i+7:i] := idx[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2B" form="xmm {k}, xmm, xmm" xed="VPERMI2B_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_permutex2var_epi8">
+ <CPUID>AVX512_VBMI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="idx" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Shuffle 8-bit integers in "a" and "b" using the corresponding selector and index in "idx", and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ off := 8*idx[i+3:i]
+ dst[i+7:i] := idx[i+4] ? b[off+7:off] : a[off+7:off]
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPERMI2B" form="xmm {z}, xmm, xmm" xed="VPERMI2B_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <instruction name="VPERMT2B" form="xmm {z}, xmm, xmm" xed="VPERMT2B_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shrdv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; (c[i+63:i] &amp; 63)
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDVQ" form="zmm {z}, zmm, zmm" xed="VPSHRDVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shrdv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; (c[i+63:i] &amp; 63)
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDVQ" form="zmm {k}, zmm, zmm" xed="VPSHRDVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shrdv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; (c[i+63:i] &amp; 63)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDVQ" form="zmm, zmm, zmm" xed="VPSHRDVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shrdv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; (c[i+63:i] &amp; 63)
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDVQ" form="ymm {z}, ymm, ymm" xed="VPSHRDVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shrdv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; (c[i+63:i] &amp; 63)
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDVQ" form="ymm {k}, ymm, ymm" xed="VPSHRDVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shrdv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; (c[i+63:i] &amp; 63)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDVQ" form="ymm, ymm, ymm" xed="VPSHRDVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shrdv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; (c[i+63:i] &amp; 63)
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDVQ" form="xmm {z}, xmm, xmm" xed="VPSHRDVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shrdv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; (c[i+63:i] &amp; 63)
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDVQ" form="xmm {k}, xmm, xmm" xed="VPSHRDVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shrdv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 64-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; (c[i+63:i] &amp; 63)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDVQ" form="xmm, xmm, xmm" xed="VPSHRDVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shrdv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; (c[i+31:i] &amp; 31)
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDVD" form="zmm {z}, zmm, zmm" xed="VPSHRDVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shrdv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; (c[i+31:i] &amp; 31)
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDVD" form="zmm {k}, zmm, zmm" xed="VPSHRDVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shrdv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; (c[i+31:i] &amp; 31)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDVD" form="zmm, zmm, zmm" xed="VPSHRDVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shrdv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="__m256i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; (c[i+31:i] &amp; 31)
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDVD" form="ymm {z}, ymm, ymm" xed="VPSHRDVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shrdv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="__m256i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; (c[i+31:i] &amp; 31)
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDVD" form="ymm {k}, ymm, ymm" xed="VPSHRDVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shrdv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="__m256i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; (c[i+31:i] &amp; 31)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDVD" form="ymm, ymm, ymm" xed="VPSHRDVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shrdv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; (c[i+31:i] &amp; 31)
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDVD" form="xmm {z}, xmm, xmm" xed="VPSHRDVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shrdv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; (c[i+31:i] &amp; 31)
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDVD" form="xmm {k}, xmm, xmm" xed="VPSHRDVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shrdv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; (c[i+31:i] &amp; 31)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDVD" form="xmm, xmm, xmm" xed="VPSHRDVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shrdv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="__m512i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; (c[i+15:i] &amp; 15)
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDVW" form="zmm {z}, zmm, zmm" xed="VPSHRDVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shrdv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="__m512i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; (c[i+15:i] &amp; 15)
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDVW" form="zmm {k}, zmm, zmm" xed="VPSHRDVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shrdv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="__m512i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; (c[i+15:i] &amp; 15)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDVW" form="zmm, zmm, zmm" xed="VPSHRDVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shrdv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="__m256i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; (c[i+15:i] &amp; 15)
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDVW" form="ymm {z}, ymm, ymm" xed="VPSHRDVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shrdv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="__m256i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; (c[i+15:i] &amp; 15)
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDVW" form="ymm {k}, ymm, ymm" xed="VPSHRDVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shrdv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="__m256i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; (c[i+15:i] &amp; 15)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDVW" form="ymm, ymm, ymm" xed="VPSHRDVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shrdv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="__m128i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; (c[i+15:i] &amp; 15)
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDVW" form="xmm {z}, xmm, xmm" xed="VPSHRDVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shrdv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="__m128i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; (c[i+15:i] &amp; 15)
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDVW" form="xmm {k}, xmm, xmm" xed="VPSHRDVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shrdv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="__m128i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by the amount specified in the corresponding element of "c", and store the lower 16-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; (c[i+15:i] &amp; 15)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDVW" form="xmm, xmm, xmm" xed="VPSHRDVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shrdi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; imm8[5:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDQ" form="zmm {z}, zmm, zmm, imm8" xed="VPSHRDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shrdi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; imm8[5:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDQ" form="zmm {k}, zmm, zmm, imm8" xed="VPSHRDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shrdi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; imm8[5:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDQ" form="zmm, zmm, zmm, imm8" xed="VPSHRDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shrdi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; imm8[5:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDQ" form="ymm {z}, ymm, ymm, imm8" xed="VPSHRDQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shrdi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; imm8[5:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDQ" form="ymm {k}, ymm, ymm, imm8" xed="VPSHRDQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shrdi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; imm8[5:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDQ" form="ymm, ymm, ymm, imm8" xed="VPSHRDQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shrdi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; imm8[5:0]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDQ" form="xmm {z}, xmm, xmm, imm8" xed="VPSHRDQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shrdi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; imm8[5:0]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDQ" form="xmm {k}, xmm, xmm, imm8" xed="VPSHRDQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shrdi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "b" and "a" producing an intermediate 128-bit result. Shift the result right by "imm8" bits, and store the lower 64-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ((b[i+63:i] &lt;&lt; 64)[127:0] | a[i+63:i]) &gt;&gt; imm8[5:0]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDQ" form="xmm, xmm, xmm, imm8" xed="VPSHRDQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shrdi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; imm8[4:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDD" form="zmm {z}, zmm, zmm, imm8" xed="VPSHRDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shrdi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; imm8[4:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDD" form="zmm {k}, zmm, zmm, imm8" xed="VPSHRDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shrdi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; imm8[4:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDD" form="zmm, zmm, zmm, imm8" xed="VPSHRDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shrdi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; imm8[4:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDD" form="ymm {z}, ymm, ymm, imm8" xed="VPSHRDD_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shrdi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; imm8[4:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDD" form="ymm {k}, ymm, ymm, imm8" xed="VPSHRDD_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shrdi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; imm8[4:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDD" form="ymm, ymm, ymm, imm8" xed="VPSHRDD_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shrdi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; imm8[4:0]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDD" form="xmm {z}, xmm, xmm, imm8" xed="VPSHRDD_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shrdi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; imm8[4:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDD" form="xmm {k}, xmm, xmm, imm8" xed="VPSHRDD_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shrdi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "b" and "a" producing an intermediate 64-bit result. Shift the result right by "imm8" bits, and store the lower 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ((b[i+31:i] &lt;&lt; 32)[63:0] | a[i+31:i]) &gt;&gt; imm8[4:0]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDD" form="xmm, xmm, xmm, imm8" xed="VPSHRDD_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shrdi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; imm8[3:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDW" form="zmm {z}, zmm, zmm, imm8" xed="VPSHRDW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shrdi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; imm8[3:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDW" form="zmm {k}, zmm, zmm, imm8" xed="VPSHRDW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shrdi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; imm8[3:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHRDW" form="zmm, zmm, zmm, imm8" xed="VPSHRDW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shrdi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; imm8[3:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDW" form="ymm {z}, ymm, ymm, imm8" xed="VPSHRDW_YMMu16_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shrdi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; imm8[3:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDW" form="ymm {k}, ymm, ymm, imm8" xed="VPSHRDW_YMMu16_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shrdi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; imm8[3:0]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHRDW" form="ymm, ymm, ymm, imm8" xed="VPSHRDW_YMMu16_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shrdi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; imm8[3:0]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDW" form="xmm {z}, xmm, xmm, imm8" xed="VPSHRDW_XMMu16_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shrdi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; imm8[3:0]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDW" form="xmm {k}, xmm, xmm, imm8" xed="VPSHRDW_XMMu16_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shrdi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "b" and "a" producing an intermediate 32-bit result. Shift the result right by "imm8" bits, and store the lower 16-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := ((b[i+15:i] &lt;&lt; 16)[31:0] | a[i+15:i]) &gt;&gt; imm8[3:0]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHRDW" form="xmm, xmm, xmm, imm8" xed="VPSHRDW_XMMu16_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shldv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; (c[i+63:i] &amp; 63)
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDVQ" form="zmm {z}, zmm, zmm" xed="VPSHLDVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shldv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; (c[i+63:i] &amp; 63)
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDVQ" form="zmm {k}, zmm, zmm" xed="VPSHLDVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shldv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__m512i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; (c[i+63:i] &amp; 63)
+ dst[i+63:i] := tmp[127:64]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDVQ" form="zmm, zmm, zmm" xed="VPSHLDVQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shldv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; (c[i+63:i] &amp; 63)
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDVQ" form="ymm {z}, ymm, ymm" xed="VPSHLDVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shldv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; (c[i+63:i] &amp; 63)
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDVQ" form="ymm {k}, ymm, ymm" xed="VPSHLDVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shldv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__m256i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; (c[i+63:i] &amp; 63)
+ dst[i+63:i] := tmp[127:64]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDVQ" form="ymm, ymm, ymm" xed="VPSHLDVQ_YMMu64_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shldv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; (c[i+63:i] &amp; 63)
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDVQ" form="xmm {z}, xmm, xmm" xed="VPSHLDVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shldv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; (c[i+63:i] &amp; 63)
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDVQ" form="xmm {k}, xmm, xmm" xed="VPSHLDVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shldv_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__m128i" varname="c" etype="UI64"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 64-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; (c[i+63:i] &amp; 63)
+ dst[i+63:i] := tmp[127:64]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDVQ" form="xmm, xmm, xmm" xed="VPSHLDVQ_XMMu64_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shldv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; (c[i+31:i] &amp; 31)
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDVD" form="zmm {z}, zmm, zmm" xed="VPSHLDVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shldv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; (c[i+31:i] &amp; 31)
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDVD" form="zmm {k}, zmm, zmm" xed="VPSHLDVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shldv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; (c[i+31:i] &amp; 31)
+ dst[i+31:i] := tmp[63:32]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDVD" form="zmm, zmm, zmm" xed="VPSHLDVD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shldv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="__m256i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; (c[i+31:i] &amp; 31)
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDVD" form="ymm {z}, ymm, ymm" xed="VPSHLDVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shldv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="__m256i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; (c[i+31:i] &amp; 31)
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDVD" form="ymm {k}, ymm, ymm" xed="VPSHLDVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shldv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="__m256i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; (c[i+31:i] &amp; 31)
+ dst[i+31:i] := tmp[63:32]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDVD" form="ymm, ymm, ymm" xed="VPSHLDVD_YMMu32_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shldv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; (c[i+31:i] &amp; 31)
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDVD" form="xmm {z}, xmm, xmm" xed="VPSHLDVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shldv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; (c[i+31:i] &amp; 31)
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDVD" form="xmm {k}, xmm, xmm" xed="VPSHLDVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shldv_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="__m128i" varname="c" etype="UI32"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; (c[i+31:i] &amp; 31)
+ dst[i+31:i] := tmp[63:32]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDVD" form="xmm, xmm, xmm" xed="VPSHLDVD_XMMu32_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shldv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="__m512i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; (c[i+15:i] &amp; 15)
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDVW" form="zmm {z}, zmm, zmm" xed="VPSHLDVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shldv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="__m512i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; (c[i+15:i] &amp; 15)
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDVW" form="zmm {k}, zmm, zmm" xed="VPSHLDVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shldv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="__m512i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; (c[i+15:i] &amp; 15)
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDVW" form="zmm, zmm, zmm" xed="VPSHLDVW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shldv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="__m256i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; (c[i+15:i] &amp; 15)
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDVW" form="ymm {z}, ymm, ymm" xed="VPSHLDVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shldv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="__m256i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; (c[i+15:i] &amp; 15)
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDVW" form="ymm {k}, ymm, ymm" xed="VPSHLDVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shldv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="__m256i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; (c[i+15:i] &amp; 15)
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDVW" form="ymm, ymm, ymm" xed="VPSHLDVW_YMMu16_MASKmskw_YMMu16_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shldv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="__m128i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; (c[i+15:i] &amp; 15)
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDVW" form="xmm {z}, xmm, xmm" xed="VPSHLDVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shldv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="__m128i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; (c[i+15:i] &amp; 15)
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDVW" form="xmm {k}, xmm, xmm" xed="VPSHLDVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shldv_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="__m128i" varname="c" etype="UI16"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by the amount specified in the corresponding element of "c", and store the upper 16-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; (c[i+15:i] &amp; 15)
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDVW" form="xmm, xmm, xmm" xed="VPSHLDVW_XMMu16_MASKmskw_XMMu16_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shldi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; imm8[5:0]
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDQ" form="zmm {z}, zmm, zmm, imm8" xed="VPSHLDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shldi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; imm8[5:0]
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDQ" form="zmm {k}, zmm, zmm, imm8" xed="VPSHLDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shldi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst").</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; imm8[5:0]
+ dst[i+63:i] := tmp[127:64]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDQ" form="zmm, zmm, zmm, imm8" xed="VPSHLDQ_ZMMu64_MASKmskw_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shldi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; imm8[5:0]
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDQ" form="ymm {z}, ymm, ymm, imm8" xed="VPSHLDQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shldi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; imm8[5:0]
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDQ" form="ymm {k}, ymm, ymm, imm8" xed="VPSHLDQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shldi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst").</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; imm8[5:0]
+ dst[i+63:i] := tmp[127:64]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDQ" form="ymm, ymm, ymm, imm8" xed="VPSHLDQ_YMMu64_MASKmskw_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shldi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; imm8[5:0]
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDQ" form="xmm {z}, xmm, xmm, imm8" xed="VPSHLDQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shldi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF k[j]
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; imm8[5:0]
+ dst[i+63:i] := tmp[127:64]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDQ" form="xmm {k}, xmm, xmm, imm8" xed="VPSHLDQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shldi_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 64-bit integers in "a" and "b" producing an intermediate 128-bit result. Shift the result left by "imm8" bits, and store the upper 64-bits in "dst").</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ tmp[127:0] := ((a[i+63:i] &lt;&lt; 64)[127:0] | b[i+63:i]) &lt;&lt; imm8[5:0]
+ dst[i+63:i] := tmp[127:64]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDQ" form="xmm, xmm, xmm, imm8" xed="VPSHLDQ_XMMu64_MASKmskw_XMMu64_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shldi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; imm8[4:0]
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDD" form="zmm {z}, zmm, zmm, imm8" xed="VPSHLDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shldi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; imm8[4:0]
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDD" form="zmm {k}, zmm, zmm, imm8" xed="VPSHLDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shldi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; imm8[4:0]
+ dst[i+31:i] := tmp[63:32]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDD" form="zmm, zmm, zmm, imm8" xed="VPSHLDD_ZMMu32_MASKmskw_ZMMu32_ZMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shldi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; imm8[4:0]
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDD" form="ymm {z}, ymm, ymm, imm8" xed="VPSHLDD_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shldi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; imm8[4:0]
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDD" form="ymm {k}, ymm, ymm, imm8" xed="VPSHLDD_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shldi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI32"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; imm8[4:0]
+ dst[i+31:i] := tmp[63:32]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDD" form="ymm, ymm, ymm, imm8" xed="VPSHLDD_YMMu32_MASKmskw_YMMu32_YMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shldi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; imm8[4:0]
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDD" form="xmm {z}, xmm, xmm, imm8" xed="VPSHLDD_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shldi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF k[j]
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; imm8[4:0]
+ dst[i+31:i] := tmp[63:32]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDD" form="xmm {k}, xmm, xmm, imm8" xed="VPSHLDD_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shldi_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 32-bit integers in "a" and "b" producing an intermediate 64-bit result. Shift the result left by "imm8" bits, and store the upper 32-bits in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ tmp[63:0] := ((a[i+31:i] &lt;&lt; 32)[63:0] | b[i+31:i]) &lt;&lt; imm8[4:0]
+ dst[i+31:i] := tmp[63:32]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDD" form="xmm, xmm, xmm, imm8" xed="VPSHLDD_XMMu32_MASKmskw_XMMu32_XMMu32_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_shldi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; imm8[3:0]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDW" form="zmm {z}, zmm, zmm, imm8" xed="VPSHLDW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_shldi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; imm8[3:0]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDW" form="zmm {k}, zmm, zmm, imm8" xed="VPSHLDW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_shldi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Shift</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <parameter type="__m512i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst").</description>
+ <operation>
+FOR j := 0 to 31
+ i := j*16
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; imm8[3:0]
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSHLDW" form="zmm, zmm, zmm, imm8" xed="VPSHLDW_ZMMu16_MASKmskw_ZMMu16_ZMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_shldi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; imm8[3:0]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDW" form="ymm {z}, ymm, ymm, imm8" xed="VPSHLDW_YMMu16_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_shldi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; imm8[3:0]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDW" form="ymm {k}, ymm, ymm, imm8" xed="VPSHLDW_YMMu16_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_shldi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <parameter type="__m256i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst").</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*16
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; imm8[3:0]
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPSHLDW" form="ymm, ymm, ymm, imm8" xed="VPSHLDW_YMMu16_MASKmskw_YMMu16_YMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_shldi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; imm8[3:0]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDW" form="xmm {z}, xmm, xmm, imm8" xed="VPSHLDW_XMMu16_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_shldi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; imm8[3:0]
+ dst[i+15:i] := tmp[31:16]
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDW" form="xmm {k}, xmm, xmm, imm8" xed="VPSHLDW_XMMu16_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_shldi_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Concatenate packed 16-bit integers in "a" and "b" producing an intermediate 32-bit result. Shift the result left by "imm8" bits, and store the upper 16-bits in "dst").</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ tmp[31:0] := ((a[i+15:i] &lt;&lt; 16)[31:0] | b[i+15:i]) &lt;&lt; imm8[3:0]
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPSHLDW" form="xmm, xmm, xmm, imm8" xed="VPSHLDW_XMMu16_MASKmskw_XMMu16_XMMu16_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expandloadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI16" memwidth="512"/>
+ <description>Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="zmm {z}, m512" xed="VPEXPANDW_ZMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expandloadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI16" memwidth="512"/>
+ <description>Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="zmm {k}, m512" xed="VPEXPANDW_ZMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expand_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[m+15:m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="zmm {z}, zmm" xed="VPEXPANDW_ZMMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expand_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[m+15:m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="zmm {k}, zmm" xed="VPEXPANDW_ZMMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expandloadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI16" memwidth="256"/>
+ <description>Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="ymm {z}, m256" xed="VPEXPANDW_YMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expandloadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI16" memwidth="256"/>
+ <description>Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="ymm {k}, m256" xed="VPEXPANDW_YMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expand_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[m+15:m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="ymm {z}, ymm" xed="VPEXPANDW_YMMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expand_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[m+15:m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="ymm {k}, ymm" xed="VPEXPANDW_YMMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expandloadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI16" memwidth="128"/>
+ <description>Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="xmm {z}, m128" xed="VPEXPANDW_XMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expandloadu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI16" memwidth="128"/>
+ <description>Load contiguous active 16-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := MEM[mem_addr+m+15:mem_addr+m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="xmm {k}, m128" xed="VPEXPANDW_XMMu16_MASKmskw_MEMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expand_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[m+15:m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="xmm {z}, xmm" xed="VPEXPANDW_XMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expand_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Load contiguous active 16-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[i+15:i] := a[m+15:m]
+ m := m + 16
+ ELSE
+ dst[i+15:i] := src[i+15:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDW" form="xmm {k}, xmm" xed="VPEXPANDW_XMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expandloadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI8" memwidth="512"/>
+ <description>Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="zmm {z}, m512" xed="VPEXPANDB_ZMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expandloadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI8" memwidth="512"/>
+ <description>Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="zmm {k}, m512" xed="VPEXPANDB_ZMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expandloadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI8" memwidth="256"/>
+ <description>Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="ymm {z}, m256" xed="VPEXPANDB_YMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expandloadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI8" memwidth="256"/>
+ <description>Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="ymm {k}, m256" xed="VPEXPANDB_YMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expandloadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI8" memwidth="128"/>
+ <description>Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="xmm {z}, m128" xed="VPEXPANDB_XMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expandloadu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Load</category>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="const void*" varname="mem_addr" etype="UI8" memwidth="128"/>
+ <description>Load contiguous active 8-bit integers from unaligned memory at "mem_addr" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := MEM[mem_addr+m+7:mem_addr+m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="xmm {k}, m128" xed="VPEXPANDB_XMMu8_MASKmskw_MEMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_expand_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[m+7:m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="zmm {z}, zmm" xed="VPEXPANDB_ZMMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_expand_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[m+7:m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="zmm {k}, zmm" xed="VPEXPANDB_ZMMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_expand_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[m+7:m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="ymm {z}, ymm" xed="VPEXPANDB_YMMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_expand_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[m+7:m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="ymm {k}, ymm" xed="VPEXPANDB_YMMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_expand_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[m+7:m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="xmm {z}, xmm" xed="VPEXPANDB_XMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_expand_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Load contiguous active 8-bit integers from "a" (those with their respective bit set in mask "k"), and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+m := 0
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[i+7:i] := a[m+7:m]
+ m := m + 8
+ ELSE
+ dst[i+7:i] := src[i+7:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPEXPANDB" form="xmm {k}, xmm" xed="VPEXPANDB_XMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compressstoreu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Store</category>
+ <category>Swizzle</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="512"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 16
+m := base_addr
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ MEM[m+size-1:m] := a[i+15:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSW" form="m512 {k}, zmm" xed="VPCOMPRESSW_MEMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compressstoreu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Store</category>
+ <category>Swizzle</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="256"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 16
+m := base_addr
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ MEM[m+size-1:m] := a[i+15:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSW" form="m256 {k}, ymm" xed="VPCOMPRESSW_MEMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compressstoreu_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Store</category>
+ <category>Swizzle</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI16" memwidth="128"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 16
+m := base_addr
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ MEM[m+size-1:m] := a[i+15:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSW" form="m128 {k}, xmm" xed="VPCOMPRESSW_MEMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_compress_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Contiguously store the active 16-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 16
+m := 0
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[m+size-1:m] := a[i+15:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := 0
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCOMPRESSW" form="zmm {z}, zmm" xed="VPCOMPRESSW_ZMMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compress_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI16"/>
+ <parameter type="__m512i" varname="src" etype="UI16"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI16"/>
+ <description>Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 16
+m := 0
+FOR j := 0 to 31
+ i := j*16
+ IF k[j]
+ dst[m+size-1:m] := a[i+15:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := src[511:m]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCOMPRESSW" form="zmm {k}, zmm" xed="VPCOMPRESSW_ZMMu16_MASKmskw_ZMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_compress_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Contiguously store the active 16-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 16
+m := 0
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[m+size-1:m] := a[i+15:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCOMPRESSW" form="ymm {z}, ymm" xed="VPCOMPRESSW_YMMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compress_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI16"/>
+ <parameter type="__m256i" varname="src" etype="UI16"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI16"/>
+ <description>Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 16
+m := 0
+FOR j := 0 to 15
+ i := j*16
+ IF k[j]
+ dst[m+size-1:m] := a[i+15:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := src[255:m]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCOMPRESSW" form="ymm {k}, ymm" xed="VPCOMPRESSW_YMMu16_MASKmskw_YMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_compress_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Contiguously store the active 16-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 16
+m := 0
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[m+size-1:m] := a[i+15:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCOMPRESSW" form="xmm {z}, xmm" xed="VPCOMPRESSW_XMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compress_epi16">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="src" etype="UI16"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Contiguously store the active 16-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 16
+m := 0
+FOR j := 0 to 7
+ i := j*16
+ IF k[j]
+ dst[m+size-1:m] := a[i+15:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := src[127:m]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCOMPRESSW" form="xmm {k}, xmm" xed="VPCOMPRESSW_XMMu16_MASKmskw_XMMu16_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compressstoreu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Store</category>
+ <category>Swizzle</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="512"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 8
+m := base_addr
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ MEM[m+size-1:m] := a[i+7:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSB" form="m512 {k}, zmm" xed="VPCOMPRESSB_MEMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compressstoreu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Store</category>
+ <category>Swizzle</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="256"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 8
+m := base_addr
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ MEM[m+size-1:m] := a[i+7:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSB" form="m256 {k}, ymm" xed="VPCOMPRESSB_MEMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compressstoreu_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Store</category>
+ <category>Swizzle</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI8" memwidth="128"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to unaligned memory at "base_addr".</description>
+ <operation>
+size := 8
+m := base_addr
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ MEM[m+size-1:m] := a[i+7:i]
+ m := m + size
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPCOMPRESSB" form="m128 {k}, xmm" xed="VPCOMPRESSB_MEMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_compress_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Contiguously store the active 8-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 8
+m := 0
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[m+size-1:m] := a[i+7:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := 0
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCOMPRESSB" form="zmm {z}, zmm" xed="VPCOMPRESSB_ZMMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_compress_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <description>Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 8
+m := 0
+FOR j := 0 to 63
+ i := j*8
+ IF k[j]
+ dst[m+size-1:m] := a[i+7:i]
+ m := m + size
+ FI
+ENDFOR
+dst[511:m] := src[511:m]
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCOMPRESSB" form="zmm {k}, zmm" xed="VPCOMPRESSB_ZMMu8_MASKmskw_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_compress_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Contiguously store the active 8-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 8
+m := 0
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[m+size-1:m] := a[i+7:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := 0
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCOMPRESSB" form="ymm {z}, ymm" xed="VPCOMPRESSB_YMMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_compress_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <description>Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 8
+m := 0
+FOR j := 0 to 31
+ i := j*8
+ IF k[j]
+ dst[m+size-1:m] := a[i+7:i]
+ m := m + size
+ FI
+ENDFOR
+dst[255:m] := src[255:m]
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCOMPRESSB" form="ymm {k}, ymm" xed="VPCOMPRESSB_YMMu8_MASKmskw_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_compress_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Contiguously store the active 8-bit integers in "a" (those with their respective bit set in zeromask "k") to "dst", and set the remaining elements to zero.</description>
+ <operation>
+size := 8
+m := 0
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[m+size-1:m] := a[i+7:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := 0
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCOMPRESSB" form="xmm {z}, xmm" xed="VPCOMPRESSB_XMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_compress_epi8">
+ <type>Integer</type>
+ <CPUID>AVX512_VBMI2</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Contiguously store the active 8-bit integers in "a" (those with their respective bit set in writemask "k") to "dst", and pass through the remaining elements from "src".</description>
+ <operation>
+size := 8
+m := 0
+FOR j := 0 to 15
+ i := j*8
+ IF k[j]
+ dst[m+size-1:m] := a[i+7:i]
+ m := m + size
+ FI
+ENDFOR
+dst[127:m] := src[127:m]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPCOMPRESSB" form="xmm {k}, xmm" xed="VPCOMPRESSB_XMMu8_MASKmskw_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2)
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPWSSDS" form="zmm {z}, zmm, zmm" xed="VPDPWSSDS_ZMMi32_MASKmskw_ZMMi16_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2)
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPWSSDS" form="zmm {k}, zmm, zmm" xed="VPDPWSSDS_ZMMi32_MASKmskw_ZMMi16_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPWSSDS" form="zmm, zmm, zmm" xed="VPDPWSSDS_ZMMi32_MASKmskw_ZMMi16_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2)
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPWSSDS" form="ymm {z}, ymm, ymm" xed="VPDPWSSDS_YMMi32_MASKmskw_YMMi16_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2)
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPWSSDS" form="ymm {k}, ymm, ymm" xed="VPDPWSSDS_YMMi32_MASKmskw_YMMi16_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPWSSDS" form="ymm, ymm, ymm" xed="VPDPWSSDS_YMMi32_MASKmskw_YMMi16_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2)
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPWSSDS" form="xmm {z}, xmm, xmm" xed="VPDPWSSDS_XMMi32_MASKmskw_XMMi16_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2)
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPWSSDS" form="xmm {k}, xmm, xmm" xed="VPDPWSSDS_XMMi32_MASKmskw_XMMi16_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_dpwssds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPWSSDS" form="xmm, xmm, xmm" xed="VPDPWSSDS_XMMi32_MASKmskw_XMMi16_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPWSSD" form="zmm {z}, zmm, zmm" xed="VPDPWSSD_ZMMi32_MASKmskw_ZMMi16_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPWSSD" form="zmm {k}, zmm, zmm" xed="VPDPWSSD_ZMMi32_MASKmskw_ZMMi16_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="SI16"/>
+ <parameter type="__m512i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPWSSD" form="zmm, zmm, zmm" xed="VPDPWSSD_ZMMi32_MASKmskw_ZMMi16_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPWSSD" form="ymm {z}, ymm, ymm" xed="VPDPWSSD_YMMi32_MASKmskw_YMMi16_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPWSSD" form="ymm {k}, ymm, ymm" xed="VPDPWSSD_YMMi32_MASKmskw_YMMi16_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="SI16"/>
+ <parameter type="__m256i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPWSSD" form="ymm, ymm, ymm" xed="VPDPWSSD_YMMi32_MASKmskw_YMMi16_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPWSSD" form="xmm {z}, xmm, xmm" xed="VPDPWSSD_XMMi32_MASKmskw_XMMi16_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ IF k[j]
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPWSSD" form="xmm {k}, xmm, xmm" xed="VPDPWSSD_XMMi32_MASKmskw_XMMi16_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_dpwssd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply groups of 2 adjacent pairs of signed 16-bit integers in "a" with corresponding 16-bit integers in "b", producing 2 intermediate signed 32-bit results. Sum these 2 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ tmp1.dword := SignExtend32(a.word[2*j]) * SignExtend32(b.word[2*j])
+ tmp2.dword := SignExtend32(a.word[2*j+1]) * SignExtend32(b.word[2*j+1])
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPWSSD" form="xmm, xmm, xmm" xed="VPDPWSSD_XMMi32_MASKmskw_XMMi16_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_dpbusds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPBUSDS" form="zmm {z}, zmm, zmm" xed="VPDPBUSDS_ZMMi32_MASKmskw_ZMMu8_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_dpbusds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPBUSDS" form="zmm {k}, zmm, zmm" xed="VPDPBUSDS_ZMMi32_MASKmskw_ZMMu8_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_dpbusds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPBUSDS" form="zmm, zmm, zmm" xed="VPDPBUSDS_ZMMi32_MASKmskw_ZMMu8_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_dpbusds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPBUSDS" form="ymm {z}, ymm, ymm" xed="VPDPBUSDS_YMMi32_MASKmskw_YMMu8_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_dpbusds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPBUSDS" form="ymm {k}, ymm, ymm" xed="VPDPBUSDS_YMMi32_MASKmskw_YMMu8_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_dpbusds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPBUSDS" form="ymm, ymm, ymm" xed="VPDPBUSDS_YMMi32_MASKmskw_YMMu8_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_dpbusds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPBUSDS" form="xmm {z}, xmm, xmm" xed="VPDPBUSDS_XMMi32_MASKmskw_XMMu8_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_dpbusds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPBUSDS" form="xmm {k}, xmm, xmm" xed="VPDPBUSDS_XMMi32_MASKmskw_XMMu8_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_dpbusds_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src" using signed saturation, and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := Saturate32(src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPBUSDS" form="xmm, xmm, xmm" xed="VPDPBUSDS_XMMi32_MASKmskw_XMMu8_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_maskz_dpbusd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPBUSD" form="zmm {z}, zmm, zmm" xed="VPDPBUSD_ZMMi32_MASKmskw_ZMMu8_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_mask_dpbusd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPBUSD" form="zmm {k}, zmm, zmm" xed="VPDPBUSD_ZMMi32_MASKmskw_ZMMu8_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_dpbusd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPDPBUSD" form="zmm, zmm, zmm" xed="VPDPBUSD_ZMMi32_MASKmskw_ZMMu8_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_maskz_dpbusd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPBUSD" form="ymm {z}, ymm, ymm" xed="VPDPBUSD_YMMi32_MASKmskw_YMMu8_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_mask_dpbusd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPBUSD" form="ymm {k}, ymm, ymm" xed="VPDPBUSD_YMMi32_MASKmskw_YMMu8_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_dpbusd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="SI32"/>
+ <parameter type="__m256i" varname="src" etype="SI32"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPDPBUSD" form="ymm, ymm, ymm" xed="VPDPBUSD_YMMi32_MASKmskw_YMMu8_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_maskz_dpbusd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+ ELSE
+ dst.dword[j] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPBUSD" form="xmm {z}, xmm, xmm" xed="VPDPBUSD_XMMi32_MASKmskw_XMMu8_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_mask_dpbusd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 3
+ IF k[j]
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+ ELSE
+ dst.dword[j] := src.dword[j]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPBUSD" form="xmm {k}, xmm, xmm" xed="VPDPBUSD_XMMi32_MASKmskw_XMMu8_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_dpbusd_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VNNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="src" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in "a" with corresponding signed 8-bit integers in "b", producing 4 intermediate signed 16-bit results. Sum these 4 results with the corresponding 32-bit integer in "src", and store the packed 32-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ tmp1.word := Signed(ZeroExtend16(a.byte[4*j]) * SignExtend16(b.byte[4*j]))
+ tmp2.word := Signed(ZeroExtend16(a.byte[4*j+1]) * SignExtend16(b.byte[4*j+1]))
+ tmp3.word := Signed(ZeroExtend16(a.byte[4*j+2]) * SignExtend16(b.byte[4*j+2]))
+ tmp4.word := Signed(ZeroExtend16(a.byte[4*j+3]) * SignExtend16(b.byte[4*j+3]))
+ dst.dword[j] := src.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VPDPBUSD" form="xmm, xmm, xmm" xed="VPDPBUSD_XMMi32_MASKmskw_XMMu8_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_2intersect_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VP2INTERSECT</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Mask</category>
+ <return type="void"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="__mmask8*" varname="k1" etype="MASK" memwidth="8"/>
+ <parameter type="__mmask8*" varname="k2" etype="MASK" memwidth="8"/>
+ <description>Compute intersection of packed 32-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers.</description>
+ <operation>
+MEM[k1+7:k1] := 0
+MEM[k2+7:k2] := 0
+FOR i := 0 TO 3
+ FOR j := 0 TO 3
+ match := (a.dword[i] == b.dword[j] ? 1 : 0)
+ MEM[k1+7:k1].bit[i] |= match
+ MEM[k2+7:k2].bit[j] |= match
+ ENDFOR
+ENDFOR
+ </operation>
+ <instruction name="VP2INTERSECTD" form="k, xmm, xmm" xed="VP2INTERSECTD_MASKmskw_XMMu32_XMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_2intersect_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VP2INTERSECT</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Mask</category>
+ <return type="void"/>
+ <parameter type="__m256i" varname="a" etype="UI32"/>
+ <parameter type="__m256i" varname="b" etype="UI32"/>
+ <parameter type="__mmask8*" varname="k1" etype="MASK" memwidth="8"/>
+ <parameter type="__mmask8*" varname="k2" etype="MASK" memwidth="8"/>
+ <description>Compute intersection of packed 32-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers.</description>
+ <operation>
+MEM[k1+7:k1] := 0
+MEM[k2+7:k2] := 0
+FOR i := 0 TO 7
+ FOR j := 0 TO 7
+ match := (a.dword[i] == b.dword[j] ? 1 : 0)
+ MEM[k1+7:k1].bit[i] |= match
+ MEM[k2+7:k2].bit[j] |= match
+ ENDFOR
+ENDFOR
+ </operation>
+ <instruction name="VP2INTERSECTD" form="k, ymm, ymm" xed="VP2INTERSECTD_MASKmskw_YMMu32_YMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_2intersect_epi32">
+ <type>Integer</type>
+ <CPUID>AVX512_VP2INTERSECT</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="void"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__mmask16*" varname="k1" etype="MASK" memwidth="16"/>
+ <parameter type="__mmask16*" varname="k2" etype="MASK" memwidth="16"/>
+ <description>Compute intersection of packed 32-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers.</description>
+ <operation>
+MEM[k1+15:k1] := 0
+MEM[k2+15:k2] := 0
+FOR i := 0 TO 15
+ FOR j := 0 TO 15
+ match := (a.dword[i] == b.dword[j] ? 1 : 0)
+ MEM[k1+15:k1].bit[i] |= match
+ MEM[k2+15:k2].bit[j] |= match
+ ENDFOR
+ENDFOR
+ </operation>
+ <instruction name="VP2INTERSECTD" form="k, zmm, zmm" xed="VP2INTERSECTD_MASKmskw_ZMMu32_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm_2intersect_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VP2INTERSECT</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Mask</category>
+ <return type="void"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <parameter type="__mmask8*" varname="k1" etype="MASK" memwidth="8"/>
+ <parameter type="__mmask8*" varname="k2" etype="MASK" memwidth="8"/>
+ <description>Compute intersection of packed 64-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers.</description>
+ <operation>
+MEM[k1+7:k1] := 0
+MEM[k2+7:k2] := 0
+FOR i := 0 TO 1
+ FOR j := 0 TO 1
+ match := (a.qword[i] == b.qword[j] ? 1 : 0)
+ MEM[k1+7:k1].bit[i] |= match
+ MEM[k2+7:k2].bit[j] |= match
+ ENDFOR
+ENDFOR
+ </operation>
+ <instruction name="VP2INTERSECTQ" form="k, xmm, xmm" xed="VP2INTERSECTQ_MASKmskw_XMMu64_XMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm256_2intersect_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VP2INTERSECT</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Mask</category>
+ <return type="void"/>
+ <parameter type="__m256i" varname="a" etype="UI64"/>
+ <parameter type="__m256i" varname="b" etype="UI64"/>
+ <parameter type="__mmask8*" varname="k1" etype="MASK" memwidth="8"/>
+ <parameter type="__mmask8*" varname="k2" etype="MASK" memwidth="8"/>
+ <description>Compute intersection of packed 64-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers.</description>
+ <operation>
+MEM[k1+7:k1] := 0
+MEM[k2+7:k2] := 0
+FOR i := 0 TO 3
+ FOR j := 0 TO 3
+ match := (a.qword[i] == b.qword[j] ? 1 : 0)
+ MEM[k1+7:k1].bit[i] |= match
+ MEM[k2+7:k2].bit[j] |= match
+ ENDFOR
+ENDFOR
+ </operation>
+ <instruction name="VP2INTERSECTQ" form="k, ymm, ymm" xed="VP2INTERSECTQ_MASKmskw_YMMu64_YMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="AVX-512" name="_mm512_2intersect_epi64">
+ <type>Integer</type>
+ <CPUID>AVX512_VP2INTERSECT</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Mask</category>
+ <return type="void"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="__m512i" varname="b" etype="UI64"/>
+ <parameter type="__mmask8*" varname="k1" etype="MASK" memwidth="8"/>
+ <parameter type="__mmask8*" varname="k2" etype="MASK" memwidth="8"/>
+ <description>Compute intersection of packed 64-bit integer vectors "a" and "b", and store indication of match in the corresponding bit of two mask registers specified by "k1" and "k2". A match in corresponding elements of "a" and "b" is indicated by a set bit in the corresponding bit of the mask registers.</description>
+ <operation>
+MEM[k1+7:k1] := 0
+MEM[k2+7:k2] := 0
+FOR i := 0 TO 7
+ FOR j := 0 TO 7
+ match := (a.qword[i] == b.qword[j] ? 1 : 0)
+ MEM[k1+7:k1].bit[i] |= match
+ MEM[k2+7:k2].bit[j] |= match
+ ENDFOR
+ENDFOR
+ </operation>
+ <instruction name="VP2INTERSECTQ" form="k, zmm, zmm" xed="VP2INTERSECTQ_MASKmskw_ZMMu64_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bextr_u32">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="start" etype="UI32"/>
+ <parameter type="unsigned int" varname="len" etype="UI32"/>
+ <description>Extract contiguous bits from unsigned 32-bit integer "a", and store the result in "dst". Extract the number of bits specified by "len", starting at the bit specified by "start".</description>
+ <operation>
+tmp[511:0] := a
+dst[31:0] := ZeroExtend32(tmp[(start[7:0] + len[7:0] - 1):start[7:0]])
+ </operation>
+ <instruction name="BEXTR" form="r32, r32, r32" xed="BEXTR_VGPR32d_VGPR32d_VGPR32d"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bextr2_u32">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="control" etype="UI32"/>
+ <description>Extract contiguous bits from unsigned 32-bit integer "a", and store the result in "dst". Extract the number of bits specified by bits 15:8 of "control", starting at the bit specified by bits 0:7 of "control".</description>
+ <operation>
+start := control[7:0]
+len := control[15:8]
+tmp[511:0] := a
+dst[31:0] := ZeroExtend32(tmp[(start[7:0] + len[7:0] - 1):start[7:0]])
+ </operation>
+ <instruction name="BEXTR" form="r32, r32, r32" xed="BEXTR_VGPR32d_VGPR32d_VGPR32d"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bextr_u64">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="start" etype="UI32"/>
+ <parameter type="unsigned int" varname="len" etype="UI32"/>
+ <description>Extract contiguous bits from unsigned 64-bit integer "a", and store the result in "dst". Extract the number of bits specified by "len", starting at the bit specified by "start".</description>
+ <operation>
+tmp[511:0] := a
+dst[63:0] := ZeroExtend64(tmp[(start[7:0] + len[7:0] - 1):start[7:0]])
+ </operation>
+ <instruction name="BEXTR" form="r64, r64, r64" xed="BEXTR_VGPR64q_VGPR64q_VGPR64q"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bextr2_u64">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="control" etype="UI64"/>
+ <description>Extract contiguous bits from unsigned 64-bit integer "a", and store the result in "dst". Extract the number of bits specified by bits 15:8 of "control", starting at the bit specified by bits 0:7 of "control"..</description>
+ <operation>
+start := control[7:0]
+len := control[15:8]
+tmp[511:0] := a
+dst[63:0] := ZeroExtend64(tmp[(start[7:0] + len[7:0] - 1):start[7:0]])
+ </operation>
+ <instruction name="BEXTR" form="r64, r64, r64" xed="BEXTR_VGPR64q_VGPR64q_VGPR64q"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_blsi_u32">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Extract the lowest set bit from unsigned 32-bit integer "a" and set the corresponding bit in "dst". All other bits in "dst" are zeroed, and all bits are zeroed if no bits are set in "a".</description>
+ <operation>
+dst := (-a) AND a
+ </operation>
+ <instruction name="BLSI" form="r32, r32" xed="BLSI_VGPR32d_VGPR32d"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_blsi_u64">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Extract the lowest set bit from unsigned 64-bit integer "a" and set the corresponding bit in "dst". All other bits in "dst" are zeroed, and all bits are zeroed if no bits are set in "a".</description>
+ <operation>
+dst := (-a) AND a
+ </operation>
+ <instruction name="BLSI" form="r64, r64" xed="BLSI_VGPR64q_VGPR64q"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_blsmsk_u32">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Set all the lower bits of "dst" up to and including the lowest set bit in unsigned 32-bit integer "a".</description>
+ <operation>
+dst := (a - 1) XOR a
+ </operation>
+ <instruction name="BLSMSK" form="r32, r32" xed="BLSMSK_VGPR32d_VGPR32d"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_blsmsk_u64">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Set all the lower bits of "dst" up to and including the lowest set bit in unsigned 64-bit integer "a".</description>
+ <operation>
+dst := (a - 1) XOR a
+ </operation>
+ <instruction name="BLSMSK" form="r64, r64" xed="BLSMSK_VGPR64q_VGPR64q"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_blsr_u32">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Copy all bits from unsigned 32-bit integer "a" to "dst", and reset (set to 0) the bit in "dst" that corresponds to the lowest set bit in "a".</description>
+ <operation>
+dst := (a - 1) AND a
+ </operation>
+ <instruction name="BLSR" form="r32, r32" xed="BLSR_VGPR32d_VGPR32d"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_blsr_u64">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Copy all bits from unsigned 64-bit integer "a" to "dst", and reset (set to 0) the bit in "dst" that corresponds to the lowest set bit in "a".</description>
+ <operation>
+dst := (a - 1) AND a
+ </operation>
+ <instruction name="BLSR" form="r64, r64" xed="BLSR_VGPR64q_VGPR64q"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_andn_u32">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="b" etype="UI32"/>
+ <description>Compute the bitwise NOT of 32-bit integer "a" and then AND with b, and store the results in dst.</description>
+ <operation>
+dst[31:0] := ((NOT a[31:0]) AND b[31:0])
+ </operation>
+ <instruction name="ANDN" form="r32, r32, r32" xed="ANDN_VGPR32d_VGPR32d_VGPR32d"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_andn_u64">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NOT of 64-bit integer "a" and then AND with b, and store the results in dst.</description>
+ <operation>
+dst[63:0] := ((NOT a[63:0]) AND b[63:0])
+ </operation>
+ <instruction name="ANDN" form="r64, r64, r64" xed="ANDN_VGPR64q_VGPR64q_VGPR64q"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_tzcnt_u32">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Count the number of trailing zero bits in unsigned 32-bit integer "a", and return that count in "dst".</description>
+ <operation>
+tmp := 0
+dst := 0
+DO WHILE ((tmp &lt; 32) AND a[tmp] == 0)
+ tmp := tmp + 1
+ dst := dst + 1
+OD
+ </operation>
+ <instruction name="TZCNT" form="r32, r32" xed="TZCNT_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_tzcnt_u64">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Count the number of trailing zero bits in unsigned 64-bit integer "a", and return that count in "dst".</description>
+ <operation>
+tmp := 0
+dst := 0
+DO WHILE ((tmp &lt; 64) AND a[tmp] == 0)
+ tmp := tmp + 1
+ dst := dst + 1
+OD
+ </operation>
+ <instruction name="TZCNT" form="r64, r64" xed="TZCNT_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_tzcnt_32">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Count the number of trailing zero bits in unsigned 32-bit integer "a", and return that count in "dst".</description>
+ <operation>
+tmp := 0
+dst := 0
+DO WHILE ((tmp &lt; 32) AND a[tmp] == 0)
+ tmp := tmp + 1
+ dst := dst + 1
+OD
+ </operation>
+ <instruction name="TZCNT" form="r32, r32" xed="TZCNT_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_tzcnt_64">
+ <type>Integer</type>
+ <CPUID>BMI1</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Count the number of trailing zero bits in unsigned 64-bit integer "a", and return that count in "dst".</description>
+ <operation>
+tmp := 0
+dst := 0
+DO WHILE ((tmp &lt; 64) AND a[tmp] == 0)
+ tmp := tmp + 1
+ dst := dst + 1
+OD
+ </operation>
+ <instruction name="TZCNT" form="r64, r64" xed="TZCNT_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bzhi_u32">
+ <type>Integer</type>
+ <CPUID>BMI2</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="index" etype="UI32"/>
+ <description>Copy all bits from unsigned 32-bit integer "a" to "dst", and reset (set to 0) the high bits in "dst" starting at "index".</description>
+ <operation>
+n := index[7:0]
+dst := a
+IF (n &lt; 32)
+ dst[31:n] := 0
+FI
+ </operation>
+ <instruction name="BZHI" form="r32, r32, r32" xed="BZHI_VGPR32d_VGPR32d_VGPR32d"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bzhi_u64">
+ <type>Integer</type>
+ <CPUID>BMI2</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="unsigned int" varname="index" etype="UI32"/>
+ <description>Copy all bits from unsigned 64-bit integer "a" to "dst", and reset (set to 0) the high bits in "dst" starting at "index".</description>
+ <operation>
+n := index[7:0]
+dst := a
+IF (n &lt; 64)
+ dst[63:n] := 0
+FI
+ </operation>
+ <instruction name="BZHI" form="r64, r64, r64" xed="BZHI_VGPR64q_VGPR64q_VGPR64q"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_pdep_u32">
+ <type>Integer</type>
+ <CPUID>BMI2</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="mask" etype="UI32"/>
+ <description>Deposit contiguous low bits from unsigned 32-bit integer "a" to "dst" at the corresponding bit locations specified by "mask"; all other bits in "dst" are set to zero.</description>
+ <operation>
+tmp := a
+dst := 0
+m := 0
+k := 0
+DO WHILE m &lt; 32
+ IF mask[m] == 1
+ dst[m] := tmp[k]
+ k := k + 1
+ FI
+ m := m + 1
+OD
+ </operation>
+ <instruction name="PDEP" form="r32, r32, r32" xed="PDEP_VGPR32d_VGPR32d_VGPR32d"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_pdep_u64">
+ <type>Integer</type>
+ <CPUID>BMI2</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="mask" etype="UI64"/>
+ <description>Deposit contiguous low bits from unsigned 64-bit integer "a" to "dst" at the corresponding bit locations specified by "mask"; all other bits in "dst" are set to zero.</description>
+ <operation>
+tmp := a
+dst := 0
+m := 0
+k := 0
+DO WHILE m &lt; 64
+ IF mask[m] == 1
+ dst[m] := tmp[k]
+ k := k + 1
+ FI
+ m := m + 1
+OD
+ </operation>
+ <instruction name="PDEP" form="r64, r64, r64" xed="PDEP_VGPR64q_VGPR64q_VGPR64q"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_pext_u32">
+ <type>Integer</type>
+ <CPUID>BMI2</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="mask" etype="UI32"/>
+ <description>Extract bits from unsigned 32-bit integer "a" at the corresponding bit locations specified by "mask" to contiguous low bits in "dst"; the remaining upper bits in "dst" are set to zero.</description>
+ <operation>
+tmp := a
+dst := 0
+m := 0
+k := 0
+DO WHILE m &lt; 32
+ IF mask[m] == 1
+ dst[k] := tmp[m]
+ k := k + 1
+ FI
+ m := m + 1
+OD
+ </operation>
+ <instruction name="PEXT" form="r32, r32, r32" xed="PEXT_VGPR32d_VGPR32d_VGPR32d"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_pext_u64">
+ <type>Integer</type>
+ <CPUID>BMI2</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="mask" etype="UI64"/>
+ <description>Extract bits from unsigned 64-bit integer "a" at the corresponding bit locations specified by "mask" to contiguous low bits in "dst"; the remaining upper bits in "dst" are set to zero.</description>
+ <operation>
+tmp := a
+dst := 0
+m := 0
+k := 0
+DO WHILE m &lt; 64
+ IF mask[m] == 1
+ dst[k] := tmp[m]
+ k := k + 1
+ FI
+ m := m + 1
+OD
+ </operation>
+ <instruction name="PEXT" form="r64, r64, r64" xed="PEXT_VGPR64q_VGPR64q_VGPR64q"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mulx_u32">
+ <type>Integer</type>
+ <CPUID>BMI2</CPUID>
+ <category>Arithmetic</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="b" etype="UI32"/>
+ <parameter type="unsigned int*" varname="hi" etype="UI32" memwidth="32"/>
+ <description>Multiply unsigned 32-bit integers "a" and "b", store the low 32-bits of the result in "dst", and store the high 32-bits in "hi". This does not read or write arithmetic flags.</description>
+ <operation>
+dst[31:0] := (a * b)[31:0]
+MEM[hi+31:hi] := (a * b)[63:32]
+ </operation>
+ <instruction name="MULX" form="r32, r32, m32" xed="MULX_VGPR32d_VGPR32d_MEMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mulx_u64">
+ <type>Integer</type>
+ <CPUID>BMI2</CPUID>
+ <category>Arithmetic</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="b" etype="UI64"/>
+ <parameter type="unsigned __int64*" varname="hi" etype="UI64" memwidth="64"/>
+ <description>Multiply unsigned 64-bit integers "a" and "b", store the low 64-bits of the result in "dst", and store the high 64-bits in "hi". This does not read or write arithmetic flags.</description>
+ <operation>
+dst[63:0] := (a * b)[63:0]
+MEM[hi+63:hi] := (a * b)[127:64]
+ </operation>
+ <instruction name="MULX" form="r64, r64, m64" xed="MULX_VGPR64q_VGPR64q_MEMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_incsspd">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Increment the shadow stack pointer by 4 times the value specified in bits [7:0] of "a".</description>
+ <operation>
+SSP := SSP + a[7:0] * 4
+ </operation>
+ <instruction name="INCSSPD" form="r32" xed="INCSSPD_GPR32u8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_incsspq">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Increment the shadow stack pointer by 8 times the value specified in bits [7:0] of "a".</description>
+ <operation>
+SSP := SSP + a[7:0] * 8
+ </operation>
+ <instruction name="INCSSPQ" form="r64" xed="INCSSPQ_GPR64u8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rdsspd_i32">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__int32" varname="dst" etype="UI32"/>
+ <parameter type="void"/>
+ <description>Read the low 32-bits of the current shadow stack pointer, and store the result in "dst".</description>
+ <operation>dst := SSP[31:0]
+ </operation>
+ <instruction name="RDSSPD" form="r32" xed="RDSSPD_GPR32u32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rdsspq_i64">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="void"/>
+ <description>Read the current shadow stack pointer, and store the result in "dst".</description>
+ <operation>dst := SSP[63:0]
+ </operation>
+ <instruction name="RDSSPQ" form="r64" xed="RDSSPQ_GPR64u64"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_saveprevssp">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Save the previous shadow stack pointer context.</description>
+ <instruction name="SAVEPREVSSP" xed="SAVEPREVSSP"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rstorssp">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="void *" varname="p"/>
+ <description>Restore the saved shadow stack pointer from the shadow stack restore token previously created on shadow stack by saveprevssp.</description>
+ <instruction name="RSTORSSP" form="m64" xed="RSTORSSP_MEMu64"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_wrssd">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="__int32" varname="val" etype="UI32"/>
+ <parameter type="void *" varname="p"/>
+ <description>Write 32-bit value in "val" to a shadow stack page in memory specified by "p".</description>
+ <instruction name="WRSSD" form="m32, r32" xed="WRSSD_MEMu32_GPR32u32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_wrssq">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="__int64" varname="val" etype="UI64"/>
+ <parameter type="void *" varname="p"/>
+ <description>Write 64-bit value in "val" to a shadow stack page in memory specified by "p".</description>
+ <instruction name="WRSSQ" form="m64, r64" xed="WRSSQ_MEMu64_GPR64u64"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_wrussd">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="__int32" varname="val" etype="UI32"/>
+ <parameter type="void *" varname="p"/>
+ <description>Write 32-bit value in "val" to a user shadow stack page in memory specified by "p".</description>
+ <instruction name="WRUSSD" form="m32, r32" xed="WRUSSD_MEMu32_GPR32u32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_wrussq">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="__int64" varname="val" etype="UI64"/>
+ <parameter type="void *" varname="p"/>
+ <description>Write 64-bit value in "val" to a user shadow stack page in memory specified by "p".</description>
+ <instruction name="WRUSSQ" form="m64, r64" xed="WRUSSQ_MEMu64_GPR64u64"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_setssbsy">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Mark shadow stack pointed to by IA32_PL0_SSP as busy.</description>
+ <instruction name="SETSSBSY" xed="SETSSBSY"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_clrssbsy">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="void *" varname="p"/>
+ <description>Mark shadow stack pointed to by "p" as not busy.</description>
+ <instruction name="CLRSSBSY" form="m64" xed="CLRSSBSY_MEMu64"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_get_ssp">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__int32" varname="dst" etype="UI32"/>
+ <parameter type="void"/>
+ <description>If CET is enabled, read the low 32-bits of the current shadow stack pointer, and store the result in "dst". Otherwise return 0.</description>
+ <operation>dst := SSP[31:0]
+ </operation>
+ <instruction name="RDSSPD" form="r32" xed="RDSSPD_GPR32u32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_get_ssp">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="void"/>
+ <description>If CET is enabled, read the current shadow stack pointer, and store the result in "dst". Otherwise return 0.</description>
+ <operation>dst := SSP[63:0]
+ </operation>
+ <instruction name="RDSSPQ" form="r64" xed="RDSSPQ_GPR64u64"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_inc_ssp">
+ <CPUID>CET_SS</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Increment the shadow stack pointer by 4 times the value specified in bits [7:0] of "a".</description>
+ <operation>
+SSP := SSP + a[7:0] * 4
+ </operation>
+ <instruction name="INCSSPD" form="r32" xed="INCSSPD_GPR32u8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_cldemote">
+ <CPUID>CLDEMOTE</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="void const *" varname="p"/>
+ <description>Hint to hardware that the cache line that contains "p" should be demoted from the cache closest to the processor core to a level more distant from the processor core.</description>
+ <instruction name="CLDEMOTE" form="m8" xed="CLDEMOTE_MEMu8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_clflushopt">
+ <CPUID>CLFLUSHOPT</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void const *" varname="p"/>
+ <description>Invalidate and flush the cache line that contains "p" from all levels of the cache hierarchy.</description>
+ <instruction name="CLFLUSHOPT" form="m8" xed="CLFLUSHOPT_MEMmprefetch"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_clwb">
+ <CPUID>CLWB</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void const *" varname="p"/>
+ <description>Write back to memory the cache line that contains "p" from any level of the cache hierarchy in the cache coherence domain.</description>
+ <instruction name="CLWB" form="m8" xed="CLWB_MEMmprefetch"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="xmm, xmm, xmm" xed="VFMADD132PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMADD213PD" form="xmm, xmm, xmm" xed="VFMADD213PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMADD231PD" form="xmm, xmm, xmm" xed="VFMADD231PD_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADD132PD" form="ymm, ymm, ymm" xed="VFMADD132PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMADD213PD" form="ymm, ymm, ymm" xed="VFMADD213PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMADD231PD" form="ymm, ymm, ymm" xed="VFMADD231PD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="xmm, xmm, xmm" xed="VFMADD132PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMADD213PS" form="xmm, xmm, xmm" xed="VFMADD213PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMADD231PS" form="xmm, xmm, xmm" xed="VFMADD231PS_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADD132PS" form="ymm, ymm, ymm" xed="VFMADD132PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMADD213PS" form="ymm, ymm, ymm" xed="VFMADD213PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMADD231PS" form="ymm, ymm, ymm" xed="VFMADD231PS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmadd_sd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] * b[63:0]) + c[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SD" form="xmm, xmm, xmm" xed="VFMADD132SD_XMMdq_XMMq_XMMq"/>
+ <instruction name="VFMADD213SD" form="xmm, xmm, xmm" xed="VFMADD213SD_XMMdq_XMMq_XMMq"/>
+ <instruction name="VFMADD231SD" form="xmm, xmm, xmm" xed="VFMADD231SD_XMMdq_XMMq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := (a[31:0] * b[31:0]) + c[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADD132SS" form="xmm, xmm, xmm" xed="VFMADD132SS_XMMdq_XMMd_XMMd"/>
+ <instruction name="VFMADD213SS" form="xmm, xmm, xmm" xed="VFMADD213SS_XMMdq_XMMd_XMMd"/>
+ <instruction name="VFMADD231SS" form="xmm, xmm, xmm" xed="VFMADD231SS_XMMdq_XMMd_XMMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="xmm, xmm, xmm" xed="VFMADDSUB132PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMADDSUB213PD" form="xmm, xmm, xmm" xed="VFMADDSUB213PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMADDSUB231PD" form="xmm, xmm, xmm" xed="VFMADDSUB231PD_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fmaddsub_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PD" form="ymm, ymm, ymm" xed="VFMADDSUB132PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMADDSUB213PD" form="ymm, ymm, ymm" xed="VFMADDSUB213PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMADDSUB231PD" form="ymm, ymm, ymm" xed="VFMADDSUB231PD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="xmm, xmm, xmm" xed="VFMADDSUB132PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMADDSUB213PS" form="xmm, xmm, xmm" xed="VFMADDSUB213PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMADDSUB231PS" form="xmm, xmm, xmm" xed="VFMADDSUB231PS_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fmaddsub_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively add and subtract packed elements in "c" to/from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMADDSUB132PS" form="ymm, ymm, ymm" xed="VFMADDSUB132PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMADDSUB213PS" form="ymm, ymm, ymm" xed="VFMADDSUB213PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMADDSUB231PS" form="ymm, ymm, ymm" xed="VFMADDSUB231PS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="xmm, xmm, xmm" xed="VFMSUB132PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMSUB213PD" form="xmm, xmm, xmm" xed="VFMSUB213PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMSUB231PD" form="xmm, xmm, xmm" xed="VFMSUB231PD_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUB132PD" form="ymm, ymm, ymm" xed="VFMSUB132PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMSUB213PD" form="ymm, ymm, ymm" xed="VFMSUB213PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMSUB231PD" form="ymm, ymm, ymm" xed="VFMSUB231PD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="xmm, xmm, xmm" xed="VFMSUB132PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMSUB213PS" form="xmm, xmm, xmm" xed="VFMSUB213PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMSUB231PS" form="xmm, xmm, xmm" xed="VFMSUB231PS_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUB132PS" form="ymm, ymm, ymm" xed="VFMSUB132PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMSUB213PS" form="ymm, ymm, ymm" xed="VFMSUB213PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMSUB231PS" form="ymm, ymm, ymm" xed="VFMSUB231PS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmsub_sd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] * b[63:0]) - c[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SD" form="xmm, xmm, xmm" xed="VFMSUB132SD_XMMdq_XMMq_XMMq"/>
+ <instruction name="VFMSUB213SD" form="xmm, xmm, xmm" xed="VFMSUB213SD_XMMdq_XMMq_XMMq"/>
+ <instruction name="VFMSUB231SD" form="xmm, xmm, xmm" xed="VFMSUB231SD_XMMdq_XMMq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmsub_ss">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the intermediate result. Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := (a[31:0] * b[31:0]) - c[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUB132SS" form="xmm, xmm, xmm" xed="VFMSUB132SS_XMMdq_XMMd_XMMd"/>
+ <instruction name="VFMSUB213SS" form="xmm, xmm, xmm" xed="VFMSUB213SS_XMMdq_XMMd_XMMd"/>
+ <instruction name="VFMSUB231SS" form="xmm, xmm, xmm" xed="VFMSUB231SS_XMMdq_XMMd_XMMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="xmm, xmm, xmm" xed="VFMSUBADD132PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMSUBADD213PD" form="xmm, xmm, xmm" xed="VFMSUBADD213PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMSUBADD231PD" form="xmm, xmm, xmm" xed="VFMSUBADD231PD_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fmsubadd_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ ELSE
+ dst[i+63:i] := (a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PD" form="ymm, ymm, ymm" xed="VFMSUBADD132PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMSUBADD213PD" form="ymm, ymm, ymm" xed="VFMSUBADD213PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMSUBADD231PD" form="ymm, ymm, ymm" xed="VFMSUBADD231PD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="xmm, xmm, xmm" xed="VFMSUBADD132PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMSUBADD213PS" form="xmm, xmm, xmm" xed="VFMSUBADD213PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFMSUBADD231PS" form="xmm, xmm, xmm" xed="VFMSUBADD231PS_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fmsubadd_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", alternatively subtract and add packed elements in "c" from/to the intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFMSUBADD132PS" form="ymm, ymm, ymm" xed="VFMSUBADD132PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMSUBADD213PS" form="ymm, ymm, ymm" xed="VFMSUBADD213PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFMSUBADD231PS" form="ymm, ymm, ymm" xed="VFMSUBADD231PS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="xmm, xmm, xmm" xed="VFNMADD132PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFNMADD213PD" form="xmm, xmm, xmm" xed="VFNMADD213PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFNMADD231PD" form="xmm, xmm, xmm" xed="VFNMADD231PD_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fnmadd_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) + c[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMADD132PD" form="ymm, ymm, ymm" xed="VFNMADD132PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFNMADD213PD" form="ymm, ymm, ymm" xed="VFNMADD213PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFNMADD231PD" form="ymm, ymm, ymm" xed="VFNMADD231PD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="xmm, xmm, xmm" xed="VFNMADD132PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFNMADD213PS" form="xmm, xmm, xmm" xed="VFNMADD213PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFNMADD231PS" form="xmm, xmm, xmm" xed="VFNMADD231PS_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fnmadd_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", add the negated intermediate result to packed elements in "c", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMADD132PS" form="ymm, ymm, ymm" xed="VFNMADD132PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFNMADD213PS" form="ymm, ymm, ymm" xed="VFNMADD213PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFNMADD231PS" form="ymm, ymm, ymm" xed="VFNMADD231PS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fnmadd_sd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := -(a[63:0] * b[63:0]) + c[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SD" form="xmm, xmm, xmm" xed="VFNMADD132SD_XMMdq_XMMq_XMMq"/>
+ <instruction name="VFNMADD213SD" form="xmm, xmm, xmm" xed="VFNMADD213SD_XMMdq_XMMq_XMMq"/>
+ <instruction name="VFNMADD231SD" form="xmm, xmm, xmm" xed="VFNMADD231SD_XMMdq_XMMq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fnmadd_ss">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and add the negated intermediate result to the lower element in "c". Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := -(a[31:0] * b[31:0]) + c[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMADD132SS" form="xmm, xmm, xmm" xed="VFNMADD132SS_XMMdq_XMMd_XMMd"/>
+ <instruction name="VFNMADD213SS" form="xmm, xmm, xmm" xed="VFNMADD213SS_XMMdq_XMMd_XMMd"/>
+ <instruction name="VFNMADD231SS" form="xmm, xmm, xmm" xed="VFNMADD231SS_XMMdq_XMMd_XMMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="xmm, xmm, xmm" xed="VFNMSUB132PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFNMSUB213PD" form="xmm, xmm, xmm" xed="VFNMSUB213PD_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFNMSUB231PD" form="xmm, xmm, xmm" xed="VFNMSUB231PD_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fnmsub_pd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256d" varname="dst" etype="FP64"/>
+ <parameter type="__m256d" varname="a" etype="FP64"/>
+ <parameter type="__m256d" varname="b" etype="FP64"/>
+ <parameter type="__m256d" varname="c" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := -(a[i+63:i] * b[i+63:i]) - c[i+63:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMSUB132PD" form="ymm, ymm, ymm" xed="VFNMSUB132PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFNMSUB213PD" form="ymm, ymm, ymm" xed="VFNMSUB213PD_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFNMSUB231PD" form="ymm, ymm, ymm" xed="VFNMSUB231PD_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="xmm, xmm, xmm" xed="VFNMSUB132PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFNMSUB213PS" form="xmm, xmm, xmm" xed="VFNMSUB213PS_XMMdq_XMMdq_XMMdq"/>
+ <instruction name="VFNMSUB231PS" form="xmm, xmm, xmm" xed="VFNMSUB231PS_XMMdq_XMMdq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm256_fnmsub_ps">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="__m256" varname="b" etype="FP32"/>
+ <parameter type="__m256" varname="c" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", subtract packed elements in "c" from the negated intermediate result, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ dst[i+31:i] := -(a[i+31:i] * b[i+31:i]) - c[i+31:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VFNMSUB132PS" form="ymm, ymm, ymm" xed="VFNMSUB132PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFNMSUB213PS" form="ymm, ymm, ymm" xed="VFNMSUB213PS_YMMqq_YMMqq_YMMqq"/>
+ <instruction name="VFNMSUB231PS" form="ymm, ymm, ymm" xed="VFNMSUB231PS_YMMqq_YMMqq_YMMqq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fnmsub_sd">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="c" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := -(a[63:0] * b[63:0]) - c[63:0]
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SD" form="xmm, xmm, xmm" xed="VFNMSUB132SD_XMMdq_XMMq_XMMq"/>
+ <instruction name="VFNMSUB213SD" form="xmm, xmm, xmm" xed="VFNMSUB213SD_XMMdq_XMMq_XMMq"/>
+ <instruction name="VFNMSUB231SD" form="xmm, xmm, xmm" xed="VFNMSUB231SD_XMMdq_XMMq_XMMq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="FMA" name="_mm_fnmsub_ss">
+ <type>Floating Point</type>
+ <CPUID>FMA</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="c" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point elements in "a" and "b", and subtract the lower element in "c" from the negated intermediate result. Store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := -(a[31:0] * b[31:0]) - c[31:0]
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VFNMSUB132SS" form="xmm, xmm, xmm" xed="VFNMSUB132SS_XMMdq_XMMd_XMMd"/>
+ <instruction name="VFNMSUB213SS" form="xmm, xmm, xmm" xed="VFNMSUB213SS_XMMdq_XMMd_XMMd"/>
+ <instruction name="VFNMSUB231SS" form="xmm, xmm, xmm" xed="VFNMSUB231SS_XMMdq_XMMd_XMMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_cvtph_ps">
+ <type>Floating Point</type>
+ <CPUID>FP16C</CPUID>
+ <category>Convert</category>
+ <return type="__m256" varname="dst" etype="FP32"/>
+ <parameter type="__m128i" varname="a" etype="FP16"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*16
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="ymm, xmm" xed="VCVTPH2PS_YMMqq_XMMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_cvtps_ph">
+ <type>Floating Point</type>
+ <CPUID>FP16C</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="FP16"/>
+ <parameter type="__m256" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst".
+ [sae_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ l := 32*j
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="xmm, ymm, imm8" xed="VCVTPS2PH_XMMdq_YMMqq_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_cvtph_ps">
+ <type>Floating Point</type>
+ <CPUID>FP16C</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128i" varname="a" etype="FP16"/>
+ <description>Convert packed half-precision (16-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ m := j*16
+ dst[i+31:i] := Convert_FP16_To_FP32(a[m+15:m])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VCVTPH2PS" form="xmm, xmm" xed="VCVTPH2PS_XMMdq_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_cvtps_ph">
+ <type>Floating Point</type>
+ <CPUID>FP16C</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="FP16"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="sae" etype="IMM" hint="TRUE" immtype="_MM_FROUND_SAE"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed half-precision (16-bit) floating-point elements, and store the results in "dst".
+ [sae_note]</description>
+ <operation>
+FOR j := 0 to 3
+ i := 16*j
+ l := 32*j
+ dst[i+15:i] := Convert_FP32_To_FP16(a[l+31:l])
+ENDFOR
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="VCVTPS2PH" form="xmm, xmm, imm8" xed="VCVTPS2PH_XMMq_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_readfsbase_u32">
+ <type>Integer</type>
+ <CPUID>FSGSBASE</CPUID>
+ <category>General Support</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <description>Read the FS segment base register and store the 32-bit result in "dst".</description>
+ <operation>dst[31:0] := FS_Segment_Base_Register
+dst[63:32] := 0
+ </operation>
+ <instruction name="RDFSBASE" form="r32" xed="RDFSBASE_GPRy"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_readfsbase_u64">
+ <type>Integer</type>
+ <CPUID>FSGSBASE</CPUID>
+ <category>General Support</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <description>Read the FS segment base register and store the 64-bit result in "dst".</description>
+ <operation>dst[63:0] := FS_Segment_Base_Register
+ </operation>
+ <instruction name="RDFSBASE" form="r64" xed="RDFSBASE_GPRy"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_readgsbase_u32">
+ <type>Integer</type>
+ <CPUID>FSGSBASE</CPUID>
+ <category>General Support</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <description>Read the GS segment base register and store the 32-bit result in "dst".</description>
+ <operation>dst[31:0] := GS_Segment_Base_Register
+dst[63:32] := 0
+ </operation>
+ <instruction name="RDGSBASE" form="r32" xed="RDGSBASE_GPRy"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_readgsbase_u64">
+ <type>Integer</type>
+ <CPUID>FSGSBASE</CPUID>
+ <category>General Support</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <description>Read the GS segment base register and store the 64-bit result in "dst".</description>
+ <operation>dst[63:0] := GS_Segment_Base_Register
+ </operation>
+ <instruction name="RDGSBASE" form="r64" xed="RDGSBASE_GPRy"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_writefsbase_u32">
+ <type>Integer</type>
+ <CPUID>FSGSBASE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Write the unsigned 32-bit integer "a" to the FS segment base register.</description>
+ <operation>
+FS_Segment_Base_Register[31:0] := a[31:0]
+FS_Segment_Base_Register[63:32] := 0
+ </operation>
+ <instruction name="WRFSBASE" form="r32" xed="WRFSBASE_GPRy"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_writefsbase_u64">
+ <type>Integer</type>
+ <CPUID>FSGSBASE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Write the unsigned 64-bit integer "a" to the FS segment base register.</description>
+ <operation>
+FS_Segment_Base_Register[63:0] := a[63:0]
+ </operation>
+ <instruction name="WRFSBASE" form="r64" xed="WRFSBASE_GPRy"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_writegsbase_u32">
+ <type>Integer</type>
+ <CPUID>FSGSBASE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Write the unsigned 32-bit integer "a" to the GS segment base register.</description>
+ <operation>
+GS_Segment_Base_Register[31:0] := a[31:0]
+GS_Segment_Base_Register[63:32] := 0
+ </operation>
+ <instruction name="WRGSBASE" form="r32" xed="WRGSBASE_GPRy"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_writegsbase_u64">
+ <type>Integer</type>
+ <CPUID>FSGSBASE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Write the unsigned 64-bit integer "a" to the GS segment base register.</description>
+ <operation>
+GS_Segment_Base_Register[63:0] := a[63:0]
+ </operation>
+ <instruction name="WRGSBASE" form="r64" xed="WRGSBASE_GPRy"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_fxrstor">
+ <CPUID>FXSR</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr" memwidth="4096"/>
+ <description>Reload the x87 FPU, MMX technology, XMM, and MXCSR registers from the 512-byte memory image at "mem_addr". This data should have been written to memory previously using the FXSAVE instruction, and in the same format as required by the operating mode. "mem_addr" must be aligned on a 16-byte boundary.</description>
+ <operation>state_x87_fpu_mmx_sse := fxrstor(MEM[mem_addr+512*8:mem_addr])
+ </operation>
+ <instruction name="FXRSTOR" form="m512" xed="FXRSTOR_MEMmfpxenv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_fxrstor64">
+ <CPUID>FXSR</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr" memwidth="4096"/>
+ <description>Reload the x87 FPU, MMX technology, XMM, and MXCSR registers from the 512-byte memory image at "mem_addr". This data should have been written to memory previously using the FXSAVE64 instruction, and in the same format as required by the operating mode. "mem_addr" must be aligned on a 16-byte boundary.</description>
+ <operation>state_x87_fpu_mmx_sse := fxrstor64(MEM[mem_addr+512*8:mem_addr])
+ </operation>
+ <instruction name="FXRSTOR64" form="m512" xed="FXRSTOR64_MEMmfpxenv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_fxsave">
+ <CPUID>FXSR</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr" memwidth="4096"/>
+ <description>Save the current state of the x87 FPU, MMX technology, XMM, and MXCSR registers to a 512-byte memory location at "mem_addr". The layout of the 512-byte region depends on the operating mode. Bytes [511:464] are available for software use and will not be overwritten by the processor.</description>
+ <operation>MEM[mem_addr+512*8:mem_addr] := fxsave(state_x87_fpu_mmx_sse)
+ </operation>
+ <instruction name="FXSAVE" form="m512" xed="FXSAVE_MEMmfpxenv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_fxsave64">
+ <CPUID>FXSR</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr" memwidth="4096"/>
+ <description>Save the current state of the x87 FPU, MMX technology, XMM, and MXCSR registers to a 512-byte memory location at "mem_addr". The layout of the 512-byte region depends on the operating mode. Bytes [511:464] are available for software use and will not be overwritten by the processor.</description>
+ <operation>MEM[mem_addr+512*8:mem_addr] := fxsave64(state_x87_fpu_mmx_sse)
+ </operation>
+ <instruction name="FXSAVE64" form="m512" xed="FXSAVE64_MEMmfpxenv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_maskz_gf2p8mul_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1.</description>
+ <operation>
+DEFINE gf2p8mul_byte(src1byte, src2byte) {
+ tword := 0
+ FOR i := 0 to 7
+ IF src2byte.bit[i]
+ tword := tword XOR (src1byte &lt;&lt; i)
+ FI
+ ENDFOR
+ FOR i := 14 downto 8
+ p := 0x11B &lt;&lt; (i-8)
+ IF tword.bit[i]
+ tword := tword XOR p
+ FI
+ ENDFOR
+ RETURN tword.byte[0]
+}
+FOR j := 0 TO 63
+ IF k[j]
+ dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j])
+ ELSE
+ dst.byte[j] := 0
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGF2P8MULB" form="zmm {z}, zmm, zmm" xed="VGF2P8MULB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_mask_gf2p8mul_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="src" etype="UI8"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1.</description>
+ <operation>
+DEFINE gf2p8mul_byte(src1byte, src2byte) {
+ tword := 0
+ FOR i := 0 to 7
+ IF src2byte.bit[i]
+ tword := tword XOR (src1byte &lt;&lt; i)
+ FI
+ ENDFOR
+ FOR i := 14 downto 8
+ p := 0x11B &lt;&lt; (i-8)
+ IF tword.bit[i]
+ tword := tword XOR p
+ FI
+ ENDFOR
+ RETURN tword.byte[0]
+}
+FOR j := 0 TO 63
+ IF k[j]
+ dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j])
+ ELSE
+ dst.byte[j] := src.byte[j]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGF2P8MULB" form="zmm {k}, zmm, zmm" xed="VGF2P8MULB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_gf2p8mul_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI8"/>
+ <parameter type="__m512i" varname="a" etype="UI8"/>
+ <parameter type="__m512i" varname="b" etype="UI8"/>
+ <description>Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst". The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1.</description>
+ <operation>
+DEFINE gf2p8mul_byte(src1byte, src2byte) {
+ tword := 0
+ FOR i := 0 to 7
+ IF src2byte.bit[i]
+ tword := tword XOR (src1byte &lt;&lt; i)
+ FI
+ ENDFOR
+ FOR i := 14 downto 8
+ p := 0x11B &lt;&lt; (i-8)
+ IF tword.bit[i]
+ tword := tword XOR p
+ FI
+ ENDFOR
+ RETURN tword.byte[0]
+}
+FOR j := 0 TO 63
+ dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGF2P8MULB" form="zmm, zmm, zmm" xed="VGF2P8MULB_ZMMu8_MASKmskw_ZMMu8_ZMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_maskz_gf2p8mul_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1.</description>
+ <operation>
+DEFINE gf2p8mul_byte(src1byte, src2byte) {
+ tword := 0
+ FOR i := 0 to 7
+ IF src2byte.bit[i]
+ tword := tword XOR (src1byte &lt;&lt; i)
+ FI
+ ENDFOR
+ FOR i := 14 downto 8
+ p := 0x11B &lt;&lt; (i-8)
+ IF tword.bit[i]
+ tword := tword XOR p
+ FI
+ ENDFOR
+ RETURN tword.byte[0]
+}
+FOR j := 0 TO 31
+ IF k[j]
+ dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j])
+ ELSE
+ dst.byte[j] := 0
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGF2P8MULB" form="ymm {z}, ymm, ymm" xed="VGF2P8MULB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_mask_gf2p8mul_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="src" etype="UI8"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1.</description>
+ <operation>
+DEFINE gf2p8mul_byte(src1byte, src2byte) {
+ tword := 0
+ FOR i := 0 to 7
+ IF src2byte.bit[i]
+ tword := tword XOR (src1byte &lt;&lt; i)
+ FI
+ ENDFOR
+ FOR i := 14 downto 8
+ p := 0x11B &lt;&lt; (i-8)
+ IF tword.bit[i]
+ tword := tword XOR p
+ FI
+ ENDFOR
+ RETURN tword.byte[0]
+}
+FOR j := 0 TO 31
+ IF k[j]
+ dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j])
+ ELSE
+ dst.byte[j] := src.byte[j]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGF2P8MULB" form="ymm {k}, ymm, ymm" xed="VGF2P8MULB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_gf2p8mul_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI8"/>
+ <parameter type="__m256i" varname="a" etype="UI8"/>
+ <parameter type="__m256i" varname="b" etype="UI8"/>
+ <description>Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst". The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1.</description>
+ <operation>
+DEFINE gf2p8mul_byte(src1byte, src2byte) {
+ tword := 0
+ FOR i := 0 to 7
+ IF src2byte.bit[i]
+ tword := tword XOR (src1byte &lt;&lt; i)
+ FI
+ ENDFOR
+ FOR i := 14 downto 8
+ p := 0x11B &lt;&lt; (i-8)
+ IF tword.bit[i]
+ tword := tword XOR p
+ FI
+ ENDFOR
+ RETURN tword.byte[0]
+}
+FOR j := 0 TO 31
+ dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j])
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGF2P8MULB" form="ymm, ymm, ymm" xed="VGF2P8MULB_YMMu8_MASKmskw_YMMu8_YMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_maskz_gf2p8mul_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1.</description>
+ <operation>
+DEFINE gf2p8mul_byte(src1byte, src2byte) {
+ tword := 0
+ FOR i := 0 to 7
+ IF src2byte.bit[i]
+ tword := tword XOR (src1byte &lt;&lt; i)
+ FI
+ ENDFOR
+ FOR i := 14 downto 8
+ p := 0x11B &lt;&lt; (i-8)
+ IF tword.bit[i]
+ tword := tword XOR p
+ FI
+ ENDFOR
+ RETURN tword.byte[0]
+}
+FOR j := 0 TO 15
+ IF k[j]
+ dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j])
+ ELSE
+ dst.byte[j] := 0
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGF2P8MULB" form="xmm {z}, xmm, xmm" xed="VGF2P8MULB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_mask_gf2p8mul_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="src" etype="UI8"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst" using writemask "k" (elements are copied from "src"" when the corresponding mask bit is not set). The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1.</description>
+ <operation>
+DEFINE gf2p8mul_byte(src1byte, src2byte) {
+ tword := 0
+ FOR i := 0 to 7
+ IF src2byte.bit[i]
+ tword := tword XOR (src1byte &lt;&lt; i)
+ FI
+ ENDFOR
+ FOR i := 14 downto 8
+ p := 0x11B &lt;&lt; (i-8)
+ IF tword.bit[i]
+ tword := tword XOR p
+ FI
+ ENDFOR
+ RETURN tword.byte[0]
+}
+FOR j := 0 TO 15
+ IF k[j]
+ dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j])
+ ELSE
+ dst.byte[j] := src.byte[j]
+ FI
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGF2P8MULB" form="xmm {k}, xmm, xmm" xed="VGF2P8MULB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_gf2p8mul_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Multiply the packed 8-bit integers in "a" and "b" in the finite field GF(2^8), and store the results in "dst". The field GF(2^8) is represented in polynomial representation with the reduction polynomial x^8 + x^4 + x^3 + x + 1.</description>
+ <operation>
+DEFINE gf2p8mul_byte(src1byte, src2byte) {
+ tword := 0
+ FOR i := 0 to 7
+ IF src2byte.bit[i]
+ tword := tword XOR (src1byte &lt;&lt; i)
+ FI
+ ENDFOR
+ FOR i := 14 downto 8
+ p := 0x11B &lt;&lt; (i-8)
+ IF tword.bit[i]
+ tword := tword XOR p
+ FI
+ ENDFOR
+ RETURN tword.byte[0]
+}
+FOR j := 0 TO 15
+ dst.byte[j] := gf2p8mul_byte(a.byte[j], b.byte[j])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGF2P8MULB" form="xmm, xmm, xmm" xed="VGF2P8MULB_XMMu8_MASKmskw_XMMu8_XMMu8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_maskz_gf2p8affine_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="x" etype="UI64"/>
+ <parameter type="__m512i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 7
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEQB" form="zmm {z}, zmm, zmm, imm8" xed="VGF2P8AFFINEQB_ZMMu8_MASKmskw_ZMMu8_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_mask_gf2p8affine_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="x" etype="UI64"/>
+ <parameter type="__m512i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 7
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := src.qword[j].byte[i]
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEQB" form="zmm {k}, zmm, zmm, imm8" xed="VGF2P8AFFINEQB_ZMMu8_MASKmskw_ZMMu8_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_gf2p8affine_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="x" etype="UI64"/>
+ <parameter type="__m512i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst".</description>
+ <operation>
+DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 7
+ FOR i := 0 to 7
+ dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b)
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEQB" form="zmm, zmm, zmm, imm8" xed="VGF2P8AFFINEQB_ZMMu8_MASKmskw_ZMMu8_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_maskz_gf2p8affine_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="x" etype="UI64"/>
+ <parameter type="__m256i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 3
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEQB" form="ymm {z}, ymm, ymm, imm8" xed="VGF2P8AFFINEQB_YMMu8_MASKmskw_YMMu8_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_mask_gf2p8affine_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="x" etype="UI64"/>
+ <parameter type="__m256i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 3
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := src.qword[j].byte[i]
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEQB" form="ymm {k}, ymm, ymm, imm8" xed="VGF2P8AFFINEQB_YMMu8_MASKmskw_YMMu8_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_gf2p8affine_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="x" etype="UI64"/>
+ <parameter type="__m256i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst".</description>
+ <operation>
+DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 3
+ FOR i := 0 to 7
+ dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b)
+ ENDFOR
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEQB" form="ymm, ymm, ymm, imm8" xed="VGF2P8AFFINEQB_YMMu8_MASKmskw_YMMu8_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_maskz_gf2p8affine_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="x" etype="UI64"/>
+ <parameter type="__m128i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 1
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEQB" form="xmm {z}, xmm, xmm, imm8" xed="VGF2P8AFFINEQB_XMMu8_MASKmskw_XMMu8_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_mask_gf2p8affine_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="x" etype="UI64"/>
+ <parameter type="__m128i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 1
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := src.qword[j].byte[i]
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEQB" form="xmm {k}, xmm, xmm, imm8" xed="VGF2P8AFFINEQB_XMMu8_MASKmskw_XMMu8_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_gf2p8affine_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="x" etype="UI64"/>
+ <parameter type="__m128i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. Store the packed 8-bit results in "dst".</description>
+ <operation>
+DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND src1byte) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 1
+ FOR i := 0 to 7
+ dst.qword[j].byte[i] := affine_byte(A.qword[j], x.qword[j].byte[i], b)
+ ENDFOR
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEQB" form="xmm, xmm, xmm, imm8" xed="VGF2P8AFFINEQB_XMMu8_MASKmskw_XMMu8_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_maskz_gf2p8affineinv_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="x" etype="UI64"/>
+ <parameter type="__m512i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 7
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEINVQB" form="zmm {z}, zmm, zmm, imm8" xed="VGF2P8AFFINEINVQB_ZMMu8_MASKmskw_ZMMu8_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_mask_gf2p8affineinv_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask64" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="x" etype="UI64"/>
+ <parameter type="__m512i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 7
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := src.qword[j].byte[b]
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEINVQB" form="zmm {k}, zmm, zmm, imm8" xed="VGF2P8AFFINEINVQB_ZMMu8_MASKmskw_ZMMu8_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_gf2p8affineinv_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512F</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="x" etype="UI64"/>
+ <parameter type="__m512i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst".</description>
+ <operation>DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 7
+ FOR i := 0 to 7
+ dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b)
+ ENDFOR
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEINVQB" form="zmm, zmm, zmm, imm8" xed="VGF2P8AFFINEINVQB_ZMMu8_MASKmskw_ZMMu8_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_maskz_gf2p8affineinv_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="x" etype="UI64"/>
+ <parameter type="__m256i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 3
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEINVQB" form="ymm {z}, ymm, ymm, imm8" xed="VGF2P8AFFINEINVQB_YMMu8_MASKmskw_YMMu8_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_mask_gf2p8affineinv_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="src" etype="UI64"/>
+ <parameter type="__mmask32" varname="k" etype="MASK"/>
+ <parameter type="__m256i" varname="x" etype="UI64"/>
+ <parameter type="__m256i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 3
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := src.qword[j].byte[i]
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEINVQB" form="ymm {k}, ymm, ymm, imm8" xed="VGF2P8AFFINEINVQB_YMMu8_MASKmskw_YMMu8_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_gf2p8affineinv_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m256i" varname="dst" etype="UI64"/>
+ <parameter type="__m256i" varname="x" etype="UI64"/>
+ <parameter type="__m256i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst".</description>
+ <operation>DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 3
+ FOR i := 0 to 7
+ dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b)
+ ENDFOR
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEINVQB" form="ymm, ymm, ymm, imm8" xed="VGF2P8AFFINEINVQB_YMMu8_MASKmskw_YMMu8_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_maskz_gf2p8affineinv_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="x" etype="UI64"/>
+ <parameter type="__m128i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using zeromask "k" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 1
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := 0
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEINVQB" form="xmm {z}, xmm, xmm, imm8" xed="VGF2P8AFFINEINVQB_XMMu8_MASKmskw_XMMu8_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_mask_gf2p8affineinv_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="src" etype="UI64"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m128i" varname="x" etype="UI64"/>
+ <parameter type="__m128i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 1
+ FOR i := 0 to 7
+ IF k[j*8+i]
+ dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b)
+ ELSE
+ dst.qword[j].byte[i] := src.qword[j].byte[i]
+ FI
+ ENDFOR
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEINVQB" form="xmm {k}, xmm, xmm, imm8" xed="VGF2P8AFFINEINVQB_XMMu8_MASKmskw_XMMu8_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_gf2p8affineinv_epi64_epi8">
+ <type>Integer</type>
+ <CPUID>GFNI</CPUID>
+ <CPUID>AVX512VL</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="x" etype="UI64"/>
+ <parameter type="__m128i" varname="A" etype="UI64"/>
+ <parameter type="int" varname="b" etype="IMM" immwidth="8"/>
+ <description>Compute an inverse affine transformation in the Galois Field 2^8. An affine transformation is defined by "A" * "x" + "b", where "A" represents an 8 by 8 bit matrix, "x" represents an 8-bit vector, and "b" is a constant immediate byte. The inverse of the 8-bit values in "x" is defined with respect to the reduction polynomial x^8 + x^4 + x^3 + x + 1. Store the packed 8-bit results in "dst".</description>
+ <operation>DEFINE parity(x) {
+ t := 0
+ FOR i := 0 to 7
+ t := t XOR x.bit[i]
+ ENDFOR
+ RETURN t
+}
+DEFINE affine_inverse_byte(tsrc2qw, src1byte, imm8) {
+ FOR i := 0 to 7
+ retbyte.bit[i] := parity(tsrc2qw.byte[7-i] AND inverse(src1byte)) XOR imm8.bit[i]
+ ENDFOR
+ RETURN retbyte
+}
+FOR j := 0 TO 1
+ FOR i := 0 to 7
+ dst.qword[j].byte[i] := affine_inverse_byte(A.qword[j], x.qword[j].byte[i], b)
+ ENDFOR
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="VGF2P8AFFINEINVQB" form="xmm, xmm, xmm, imm8" xed="VGF2P8AFFINEINVQB_XMMu8_MASKmskw_XMMu8_XMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_invpcid">
+ <CPUID>INVPCID</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="type" etype="UI32"/>
+ <parameter type="void*" varname="descriptor" memwidth="128"/>
+ <description>Invalidate mappings in the Translation Lookaside Buffers (TLBs) and paging-structure caches for the processor context identifier (PCID) specified by "descriptor" based on the invalidation type specified in "type".
+ The PCID "descriptor" is specified as a 16-byte memory operand (with no alignment restrictions) where bits [11:0] specify the PCID, and bits [127:64] specify the linear address; bits [63:12] are reserved.
+ The types supported are:
+ 0) Individual-address invalidation: If "type" is 0, the logical processor invalidates mappings for a single linear address and tagged with the PCID specified in "descriptor", except global translations. The instruction may also invalidate global translations, mappings for other linear addresses, or mappings tagged with other PCIDs.
+ 1) Single-context invalidation: If "type" is 1, the logical processor invalidates all mappings tagged with the PCID specified in "descriptor" except global translations. In some cases, it may invalidate mappings for other PCIDs as well.
+ 2) All-context invalidation: If "type" is 2, the logical processor invalidates all mappings tagged with any PCID.
+ 3) All-context invalidation, retaining global translations: If "type" is 3, the logical processor invalidates all mappings tagged with any PCID except global translations, ignoring "descriptor". The instruction may also invalidate global translations as well.</description>
+ <operation>
+CASE type[1:0] OF
+0: // individual-address invalidation retaining global translations
+ OP_PCID := MEM[descriptor+11:descriptor]
+ ADDR := MEM[descriptor+127:descriptor+64]
+ BREAK
+1: // single PCID invalidation retaining globals
+ OP_PCID := MEM[descriptor+11:descriptor]
+ // invalidate all mappings tagged with OP_PCID except global translations
+ BREAK
+2: // all PCID invalidation
+ // invalidate all mappings tagged with any PCID
+ BREAK
+3: // all PCID invalidation retaining global translations
+ // invalidate all mappings tagged with any PCID except global translations
+ BREAK
+ESAC
+ </operation>
+ <instruction name="INVPCID" form="r32, m128" xed="INVPCID_GPR32_MEMdq"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm_prefetch">
+ <CPUID>KNCNI</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="char const*" varname="p" etype="UI8"/>
+ <parameter type="int" varname="i" etype="IMM" immwidth="2"/>
+ <description>Fetch the line of data from memory that contains address "p" to a location in the cache heirarchy specified by the locality hint "i".</description>
+ <instruction name="VPREFETCH0" form="m8"/>
+ <instruction name="VPREFETCH1" form="m8"/>
+ <instruction name="VPREFETCH2" form="m8"/>
+ <instruction name="VPREFETCHNTA" form="m8"/>
+ <instruction name="VPREFETCHE0" form="m8"/>
+ <instruction name="VPREFETCHE1" form="m8"/>
+ <instruction name="VPREFETCHE2" form="m8"/>
+ <instruction name="VPREFETCHENTA" form="m8"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kandn">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise NOT of 16-bit masks "a" and then AND with "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := (NOT a[15:0]) AND b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KANDN" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kand">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise AND of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := a[15:0] AND b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KAND" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kmov">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <description>Copy 16-bit mask "a" to "k".</description>
+ <operation>
+k[15:0] := a[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KMOV" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_knot">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <description>Compute the bitwise NOT of 16-bit mask "a", and store the result in "k".</description>
+ <operation>
+k[15:0] := NOT a[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KNOT" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kor">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise OR of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := a[15:0] OR b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KOR" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kxnor">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XNOR of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := NOT (a[15:0] XOR b[15:0])
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KXNOR" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kxor">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="a" etype="MASK"/>
+ <parameter type="__mmask16" varname="b" etype="MASK"/>
+ <description>Compute the bitwise XOR of 16-bit masks "a" and "b", and store the result in "k".</description>
+ <operation>
+k[15:0] := a[15:0] XOR b[15:0]
+k[MAX:16] := 0
+ </operation>
+ <instruction name="KXOR" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_cmplt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in mask vector "k".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPLTD" form="k, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_cmplt_epi32_mask">
+ <type>Integer</type>
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Compare</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="SI32"/>
+ <parameter type="__m512i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than-or-equal, and store the results in mask vector "k" using zeromask "k1" (elements are zeroed out when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k[j] := ( a[i+31:i] &lt; b[i+31:i] ) ? 1 : 0
+ ELSE
+ k[j] := 0
+ FI
+ENDFOR
+k[MAX:16] := 0
+ </operation>
+ <instruction name="VPCMPLTD" form="k {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extload_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="void const *" varname="mt" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="_MM_BROADCAST32_ENUM" varname="bc" etype="UI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Depending on "bc", loads 1, 4, or 16 elements of type and size determined by "conv" from memory address "mt" and converts all elements to single-precision (32-bit) floating-point elements, storing the results in "dst". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>addr := MEM[mt]
+FOR j := 0 to 15
+ i := j*32
+ CASE bc OF
+ _MM_BROADCAST32_NONE:
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ n := j*32
+ dst[i+31:i] := addr[n+31:n]
+ _MM_UPCONV_PS_FLOAT16:
+ n := j*16
+ dst[i+31:i] := Convert_FP16_To_FP32(addr[n+15:n])
+ _MM_UPCONV_PS_UINT8:
+ n := j*8
+ dst[i+31:i] := Convert_UInt8_To_FP32(addr[n+7:n])
+ _MM_UPCONV_PS_SINT8:
+ n := j*8
+ dst[i+31:i] := Convert_Int8_To_FP32(addr[n+7:n])
+ _MM_UPCONV_PS_UINT16:
+ n := j*16
+ dst[i+31:i] := Convert_UInt16_To_FP32(addr[n+15:n])
+ _MM_UPCONV_PS_SINT16:
+ n := j*16
+ dst[i+31:i] := Convert_Int16_To_FP32(addr[n+15:n])
+ ESAC
+ _MM_BROADCAST_1X16:
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ n := j*32
+ dst[i+31:i] := addr[31:0]
+ _MM_UPCONV_PS_FLOAT16:
+ n := j*16
+ dst[i+31:i] := Convert_FP16_To_FP32(addr[15:0])
+ _MM_UPCONV_PS_UINT8:
+ n := j*8
+ dst[i+31:i] := Convert_UInt8_To_FP32(addr[7:0])
+ _MM_UPCONV_PS_SINT8:
+ n := j*8
+ dst[i+31:i] := Convert_Int8_To_FP32(addr[7:0])
+ _MM_UPCONV_PS_UINT16:
+ n := j*16
+ dst[i+31:i] := Convert_UInt16_To_FP32(addr[15:0])
+ _MM_UPCONV_PS_SINT16:
+ n := j*16
+ dst[i+31:i] := Convert_Int16_To_FP32(addr[15:0])
+ ESAC
+ _MM_BROADCAST_4X16:
+ mod := j%4
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ n := mod*32
+ dst[i+31:i] := addr[n+31:n]
+ _MM_UPCONV_PS_FLOAT16:
+ n := mod*16
+ dst[i+31:i] := Convert_FP16_To_FP32(addr[n+15:n])
+ _MM_UPCONV_PS_UINT8:
+ n := mod*8
+ dst[i+31:i] := Convert_UInt8_To_FP32(addr[n+7:n])
+ _MM_UPCONV_PS_SINT8:
+ n := mod*8
+ dst[i+31:i] := Convert_Int8_To_FP32(addr[n+7:n])
+ _MM_UPCONV_PS_UINT16:
+ n := mod*16
+ dst[i+31:i] := Convert_UInt16_To_FP32(addr[n+15:n])
+ _MM_UPCONV_PS_SINT16:
+ n := mod*16
+ dst[i+31:i] := Convert_Int16_To_FP32(addr[n+15:n])
+ ESAC
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="zmm, m512" xed="VMOVAPS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <instruction name="VBROADCASTF32X4" form="zmm, m512" xed="VBROADCASTF32X4_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <instruction name="VBROADCASTSS" form="zmm, m512" xed="VBROADCASTSS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extload_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="_MM_BROADCAST32_ENUM" varname="bc" etype="UI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Depending on "bc", loads 1, 4, or 16 elements of type and size determined by "conv" from memory address "mt" and converts all elements to single-precision (32-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>addr := MEM[mt]
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ CASE bc OF
+ _MM_BROADCAST32_NONE:
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ n := j*32
+ dst[i+31:i] := addr[n+31:n]
+ _MM_UPCONV_PS_FLOAT16:
+ n := j*16
+ dst[i+31:i] := Convert_FP16_To_FP32(addr[n+15:n])
+ _MM_UPCONV_PS_UINT8:
+ n := j*8
+ dst[i+31:i] := Convert_UInt8_To_FP32(addr[n+7:n])
+ _MM_UPCONV_PS_SINT8:
+ n := j*8
+ dst[i+31:i] := Convert_Int8_To_FP32(addr[n+7:n])
+ _MM_UPCONV_PS_UINT16:
+ n := j*16
+ dst[i+31:i] := Convert_UInt16_To_FP32(addr[n+15:n])
+ _MM_UPCONV_PS_SINT16:
+ n := j*16
+ dst[i+31:i] := Convert_Int16_To_FP32(addr[n+15:n])
+ ESAC
+ _MM_BROADCAST_1X16:
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ n := j*32
+ dst[i+31:i] := addr[31:0]
+ _MM_UPCONV_PS_FLOAT16:
+ n := j*16
+ dst[i+31:i] := Convert_FP16_To_FP32(addr[15:0])
+ _MM_UPCONV_PS_UINT8:
+ n := j*8
+ dst[i+31:i] := Convert_UInt8_To_FP32(addr[7:0])
+ _MM_UPCONV_PS_SINT8:
+ n := j*8
+ dst[i+31:i] := Convert_Int8_To_FP32(addr[7:0])
+ _MM_UPCONV_PS_UINT16:
+ n := j*16
+ dst[i+31:i] := Convert_UInt16_To_FP32(addr[15:0])
+ _MM_UPCONV_PS_SINT16:
+ n := j*16
+ dst[i+31:i] := Convert_Int16_To_FP32(addr[15:0])
+ ESAC
+ _MM_BROADCAST_4X16:
+ mod := j%4
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ n := mod*32
+ dst[i+31:i] := addr[n+31:n]
+ _MM_UPCONV_PS_FLOAT16:
+ n := mod*16
+ dst[i+31:i] := Convert_FP16_To_FP32(addr[n+15:n])
+ _MM_UPCONV_PS_UINT8:
+ n := mod*8
+ dst[i+31:i] := Convert_UInt8_To_FP32(addr[n+7:n])
+ _MM_UPCONV_PS_SINT8:
+ n := mod*8
+ dst[i+31:i] := Convert_Int8_To_FP32(addr[n+7:n])
+ _MM_UPCONV_PS_UINT16:
+ n := mod*16
+ dst[i+31:i] := Convert_UInt16_To_FP32(addr[n+15:n])
+ _MM_UPCONV_PS_SINT16:
+ n := mod*16
+ dst[i+31:i] := Convert_Int16_To_FP32(addr[n+15:n])
+ ESAC
+ ESAC
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPS" form="zmm {k}, m512" xed="VMOVAPS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <instruction name="VBROADCASTF32X4" form="zmm {k}, m512" xed="VBROADCASTF32X4_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <instruction name="VBROADCASTSS" form="zmm {k}, m512" xed="VBROADCASTSS_ZMMf32_MASKmskw_MEMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extload_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="void const *" varname="mt" etype="UI32"/>
+ <parameter type="_MM_UPCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI32"/>
+ <parameter type="_MM_BROADCAST32_ENUM" varname="bc" etype="UI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Depending on "bc", loads 1, 4, or 16 elements of type and size determined by "conv" from memory address "mt" and converts all elements to 32-bit integer elements, storing the results in "dst". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>addr := MEM[mt]
+FOR j := 0 to 15
+ i := j*32
+ CASE bc OF
+ _MM_BROADCAST32_NONE:
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ n := j*32
+ dst[i+31:i] := addr[n+31:n]
+ _MM_UPCONV_EPI32_UINT8:
+ n := j*8
+ dst[i+31:i] := ZeroExtend32(addr[n+7:n])
+ _MM_UPCONV_EPI32_SINT8:
+ n := j*8
+ dst[i+31:i] := SignExtend32(addr[n+7:n])
+ _MM_UPCONV_EPI32_UINT16:
+ n := j*16
+ dst[i+31:i] := ZeroExtend32(addr[n+15:n])
+ _MM_UPCONV_EPI32_SINT16:
+ n := j*16
+ dst[i+31:i] := SignExtend32(addr[n+15:n])
+ ESAC
+ _MM_BROADCAST_1X16:
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ n := j*32
+ dst[i+31:i] := addr[31:0]
+ _MM_UPCONV_EPI32_UINT8:
+ n := j*8
+ dst[i+31:i] := ZeroExtend32(addr[7:0])
+ _MM_UPCONV_EPI32_SINT8:
+ n := j*8
+ dst[i+31:i] := SignExtend32(addr[7:0])
+ _MM_UPCONV_EPI32_UINT16:
+ n := j*16
+ dst[i+31:i] := ZeroExtend32(addr[15:0])
+ _MM_UPCONV_EPI32_SINT16:
+ n := j*16
+ dst[i+31:i] := SignExtend32(addr[15:0])
+ ESAC
+ _MM_BROADCAST_4X16:
+ mod := j%4
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ n := mod*32
+ dst[i+31:i] := addr[n+31:n]
+ _MM_UPCONV_EPI32_UINT8:
+ n := mod*8
+ dst[i+31:i] := ZeroExtend32(addr[n+7:n])
+ _MM_UPCONV_EPI32_SINT8:
+ n := mod*8
+ dst[i+31:i] := SignExtend32(addr[n+7:n])
+ _MM_UPCONV_EPI32_UINT16:
+ n := mod*16
+ dst[i+31:i] := ZeroExtend32(addr[n+15:n])
+ _MM_UPCONV_EPI32_SINT16:
+ n := mod*16
+ dst[i+31:i] := SignExtend32(addr[n+15:n])
+ ESAC
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="zmm, m512" xed="VMOVDQA32_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <instruction name="VBROADCASTI32X4" form="zmm, m512" xed="VBROADCASTI32X4_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <instruction name="VPBROADCASTD" form="zmm, m512" xed="VPBROADCASTD_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extload_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="UI32"/>
+ <parameter type="_MM_UPCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI32"/>
+ <parameter type="_MM_BROADCAST32_ENUM" varname="bc" etype="UI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Depending on "bc", loads 1, 4, or 16 elements of type and size determined by "conv" from memory address "mt" and converts all elements to 32-bit integer elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>addr := MEM[mt]
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ CASE bc OF
+ _MM_BROADCAST32_NONE:
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ n := j*32
+ dst[i+31:i] := addr[n+31:n]
+ _MM_UPCONV_EPI32_UINT8:
+ n := j*8
+ dst[i+31:i] := ZeroExtend32(addr[n+7:n])
+ _MM_UPCONV_EPI32_SINT8:
+ n := j*8
+ dst[i+31:i] := SignExtend32(addr[n+7:n])
+ _MM_UPCONV_EPI32_UINT16:
+ n := j*16
+ dst[i+31:i] := ZeroExtend32(addr[n+15:n])
+ _MM_UPCONV_EPI32_SINT16:
+ n := j*16
+ dst[i+31:i] := SignExtend32(addr[n+15:n])
+ ESAC
+ _MM_BROADCAST_1X16:
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ n := j*32
+ dst[i+31:i] := addr[31:0]
+ _MM_UPCONV_EPI32_UINT8:
+ n := j*8
+ dst[i+31:i] := ZeroExtend32(addr[7:0])
+ _MM_UPCONV_EPI32_SINT8:
+ n := j*8
+ dst[i+31:i] := SignExtend32(addr[7:0])
+ _MM_UPCONV_EPI32_UINT16:
+ n := j*16
+ dst[i+31:i] := ZeroExtend32(addr[15:0])
+ _MM_UPCONV_EPI32_SINT16:
+ n := j*16
+ dst[i+31:i] := SignExtend32(addr[15:0])
+ ESAC
+ _MM_BROADCAST_4X16:
+ mod := j%4
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ n := mod*32
+ dst[i+31:i] := addr[n+31:n]
+ _MM_UPCONV_EPI32_UINT8:
+ n := mod*8
+ dst[i+31:i] := ZeroExtend32(addr[n+7:n])
+ _MM_UPCONV_EPI32_SINT8:
+ n := mod*8
+ dst[i+31:i] := SignExtend32(addr[n+7:n])
+ _MM_UPCONV_EPI32_UINT16:
+ n := mod*16
+ dst[i+31:i] := ZeroExtend32(addr[n+15:n])
+ _MM_UPCONV_EPI32_SINT16:
+ n := mod*16
+ dst[i+31:i] := SignExtend32(addr[n+15:n])
+ ESAC
+ ESAC
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA32" form="zmm {k}, m512" xed="VMOVDQA32_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <instruction name="VBROADCASTI32X4" form="zmm {k}, m512" xed="VBROADCASTI32X4_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <instruction name="VPBROADCASTD" form="zmm {k}, m512" xed="VPBROADCASTD_ZMMu32_MASKmskw_MEMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extload_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="void const *" varname="mt" etype="FP64"/>
+ <parameter type="_MM_UPCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PD"/>
+ <parameter type="_MM_BROADCAST64_ENUM" varname="bc" etype="UI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Depending on "bc", loads 1, 4, or 8 elements of type and size determined by "conv" from memory address "mt" and converts all elements to double-precision (64-bit) floating-point elements, storing the results in "dst". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>addr := MEM[mt]
+FOR j := 0 to 7
+ i := j*64
+ CASE bc OF
+ _MM_BROADCAST64_NONE:
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ n := j*64
+ dst[i+63:i] := addr[n+63:n]
+ ESAC
+ _MM_BROADCAST_1X8:
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ n := j*64
+ dst[i+63:i] := addr[63:0]
+ ESAC
+ _MM_BROADCAST_4X8:
+ mod := j%4
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ n := mod*64
+ dst[i+63:i] := addr[n+63:n]
+ ESAC
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="zmm, m512" xed="VMOVAPD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <instruction name="VBROADCASTF64X4" form="zmm, m512" xed="VBROADCASTF64X4_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <instruction name="VBROADCASTSD" form="zmm, m512" xed="VBROADCASTSD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extload_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="FP64"/>
+ <parameter type="_MM_UPCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PD"/>
+ <parameter type="_MM_BROADCAST64_ENUM" varname="bc" etype="UI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Depending on "bc", loads 1, 4, or 8 elements of type and size determined by "conv" from memory address "mt" and converts all elements to double-precision (64-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>addr := MEM[mt]
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ CASE bc OF
+ _MM_BROADCAST64_NONE:
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ n := j*64
+ dst[i+63:i] := addr[n+63:n]
+ ESAC
+ _MM_BROADCAST_1X8:
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ n := j*64
+ dst[i+63:i] := addr[63:0]
+ ESAC
+ _MM_BROADCAST_4X8:
+ mod := j%4
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ n := mod*64
+ dst[i+63:i] := addr[n+63:n]
+ ESAC
+ ESAC
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVAPD" form="zmm {k}, m512" xed="VMOVAPD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <instruction name="VBROADCASTF64X4" form="zmm {k}, m512" xed="VBROADCASTF64X4_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <instruction name="VBROADCASTSD" form="zmm {k}, m512" xed="VBROADCASTSD_ZMMf64_MASKmskw_MEMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extload_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="void const *" varname="mt" etype="UI64"/>
+ <parameter type="_MM_UPCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI64"/>
+ <parameter type="_MM_BROADCAST64_ENUM" varname="bc" etype="UI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Depending on "bc", loads 1, 4, or 8 elements of type and size determined by "conv" from memory address "mt" and converts all elements to 64-bit integer elements, storing the results in "dst". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>addr := MEM[mt]
+FOR j := 0 to 7
+ i := j*64
+ CASE bc OF
+ _MM_BROADCAST64_NONE:
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ n := j*64
+ dst[i+63:i] := addr[n+63:n]
+ ESAC
+ _MM_BROADCAST_1X8:
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ n := j*64
+ dst[i+63:i] := addr[63:0]
+ ESAC
+ _MM_BROADCAST_4X8:
+ mod := j%4
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ n := mod*64
+ dst[i+63:i] := addr[n+63:n]
+ ESAC
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="zmm, m512" xed="VMOVDQA64_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <instruction name="VBROADCASTI64X4" form="zmm, m512" xed="VBROADCASTI64X4_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <instruction name="VPBROADCASTQ" form="zmm, m512" xed="VPBROADCASTQ_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extload_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="UI64"/>
+ <parameter type="_MM_UPCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI64"/>
+ <parameter type="_MM_BROADCAST64_ENUM" varname="bc" etype="UI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Depending on "bc", loads 1, 4, or 8 elements of type and size determined by "conv" from memory address "mt" and converts all elements to 64-bit integer elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>addr := MEM[mt]
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ CASE bc OF
+ _MM_BROADCAST64_NONE:
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ n := j*64
+ dst[i+63:i] := addr[n+63:n]
+ ESAC
+ _MM_BROADCAST_1X8:
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ n := j*64
+ dst[i+63:i] := addr[63:0]
+ ESAC
+ _MM_BROADCAST_4X8:
+ mod := j%4
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ n := mod*64
+ dst[i+63:i] := addr[n+63:n]
+ ESAC
+ ESAC
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VMOVDQA64" form="m512 {k}, zmm" xed="VMOVDQA64_MEMu64_MASKmskw_ZMMu64_AVX512"/>
+ <instruction name="VBROADCASTI64X4" form="zmm {k}, m512" xed="VBROADCASTI64X4_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <instruction name="VPBROADCASTQ" form="zmm {k}, m512" xed="VPBROADCASTQ_ZMMu64_MASKmskw_MEMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_swizzle_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="v" etype="FP32"/>
+ <parameter type="_MM_SWIZZLE_ENUM" varname="s" etype="UI32"/>
+ <description>Performs a swizzle transformation of each of the four groups of packed 4xsingle-precision (32-bit) floating-point elements in "v" using swizzle parameter "s", storing the results in "dst".</description>
+ <operation>CASE s OF
+_MM_SWIZ_REG_NONE:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_DCBA:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_CDAB:
+ FOR j := 0 to 7
+ i := j*64
+ dst[i+31:i] := v[i+63:i+32]
+ dst[i+63:i+32] := v[i+31:i]
+ ENDFOR
+_MM_SWIZ_REG_BADC:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+95:i+64]
+ dst[i+63:i+32] := v[i+127:i+96]
+ dst[i+95:i+64] := v[i+31:i]
+ dst[i+127:i+96] := v[i+63:i+32]
+ ENDFOR
+_MM_SWIZ_REG_AAAA:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+31:i]
+ dst[i+63:i+32] := v[i+31:i]
+ dst[i+95:i+64] := v[i+31:i]
+ dst[i+127:i+96] := v[i+31:i]
+ ENDFOR
+_MM_SWIZ_REG_BBBB:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+63:i+32]
+ dst[i+63:i+32] := v[i+63:i+32]
+ dst[i+95:i+64] := v[i+63:i+32]
+ dst[i+127:i+96] := v[i+63:i+32]
+ ENDFOR
+_MM_SWIZ_REG_CCCC:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+95:i+64]
+ dst[i+63:i+32] := v[i+95:i+64]
+ dst[i+95:i+64] := v[i+95:i+64]
+ dst[i+127:i+96] := v[i+95:i+64]
+ ENDFOR
+_MM_SWIZ_REG_DDDD:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+127:i+96]
+ dst[i+63:i+32] := v[i+127:i+96]
+ dst[i+95:i+64] := v[i+127:i+96]
+ dst[i+127:i+96] := v[i+127:i+96]
+ ENDFOR
+_MM_SWIZ_REG_DACB:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+63:i+32]
+ dst[i+63:i+32] := v[i+95:i+64]
+ dst[i+95:i+64] := v[i+31:i]
+ dst[i+127:i+96] := v[i+127:i+96]
+ ENDFOR
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_swizzle_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="v" etype="FP64"/>
+ <parameter type="_MM_SWIZZLE_ENUM" varname="s" etype="UI64"/>
+ <description>Performs a swizzle transformation of each of the two groups of packed 4x double-precision (64-bit) floating-point elements in "v" using swizzle parameter "s", storing the results in "dst".</description>
+ <operation>CASE s OF
+_MM_SWIZ_REG_NONE:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_DCBA:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_CDAB:
+ FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := v[i+127:i+64]
+ dst[i+127:i+64] := v[i+63:i]
+ ENDFOR
+_MM_SWIZ_REG_BADC:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+191:i+128]
+ dst[i+127:i+64] := v[i+255:i+192]
+ dst[i+191:i+128] := v[i+63:i]
+ dst[i+255:i+192] := v[i+127:i+64]
+ ENDFOR
+_MM_SWIZ_REG_AAAA:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+63:i]
+ dst[i+127:i+64] := v[i+63:i]
+ dst[i+191:i+128] := v[i+63:i]
+ dst[i+255:i+192] := v[i+63:i]
+ ENDFOR
+_MM_SWIZ_REG_BBBB:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+127:i+63]
+ dst[i+127:i+64] := v[i+127:i+63]
+ dst[i+191:i+128] := v[i+127:i+63]
+ dst[i+255:i+192] := v[i+127:i+63]
+ ENDFOR
+_MM_SWIZ_REG_CCCC:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+191:i+128]
+ dst[i+127:i+64] := v[i+191:i+128]
+ dst[i+191:i+128] := v[i+191:i+128]
+ dst[i+255:i+192] := v[i+191:i+128]
+ ENDFOR
+_MM_SWIZ_REG_DDDD:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+255:i+192]
+ dst[i+127:i+64] := v[i+255:i+192]
+ dst[i+191:i+128] := v[i+255:i+192]
+ dst[i+255:i+192] := v[i+255:i+192]
+ ENDFOR
+_MM_SWIZ_REG_DACB:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+127:i+64]
+ dst[i+127:i+64] := v[i+191:i+128]
+ dst[i+191:i+128] := v[i+63:i]
+ dst[i+255:i+192] := v[i+255:i+192]
+ ENDFOR
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_swizzle_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v" etype="UI32"/>
+ <parameter type="_MM_SWIZZLE_ENUM" varname="s" etype="UI32"/>
+ <description>Performs a swizzle transformation of each of the four groups of packed 4x 32-bit integer elements in "v" using swizzle parameter "s", storing the results in "dst".</description>
+ <operation>CASE s OF
+_MM_SWIZ_REG_NONE:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_DCBA:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_CDAB:
+ FOR j := 0 to 7
+ i := j*64
+ dst[i+31:i] := v[i+63:i+32]
+ dst[i+63:i+32] := v[i+31:i]
+ ENDFOR
+_MM_SWIZ_REG_BADC:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+95:i+64]
+ dst[i+63:i+32] := v[i+127:i+96]
+ dst[i+95:i+64] := v[i+31:i]
+ dst[i+127:i+96] := v[i+63:i+32]
+ ENDFOR
+_MM_SWIZ_REG_AAAA:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+31:i]
+ dst[i+63:i+32] := v[i+31:i]
+ dst[i+95:i+64] := v[i+31:i]
+ dst[i+127:i+96] := v[i+31:i]
+ ENDFOR
+_MM_SWIZ_REG_BBBB:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+63:i+32]
+ dst[i+63:i+32] := v[i+63:i+32]
+ dst[i+95:i+64] := v[i+63:i+32]
+ dst[i+127:i+96] := v[i+63:i+32]
+ ENDFOR
+_MM_SWIZ_REG_CCCC:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+95:i+64]
+ dst[i+63:i+32] := v[i+95:i+64]
+ dst[i+95:i+64] := v[i+95:i+64]
+ dst[i+127:i+96] := v[i+95:i+64]
+ ENDFOR
+_MM_SWIZ_REG_DDDD:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+127:i+96]
+ dst[i+63:i+32] := v[i+127:i+96]
+ dst[i+95:i+64] := v[i+127:i+96]
+ dst[i+127:i+96] := v[i+127:i+96]
+ ENDFOR
+_MM_SWIZ_REG_DACB:
+ FOR j := 0 to 3
+ i := j*128
+ dst[i+31:i] := v[i+63:i+32]
+ dst[i+63:i+32] := v[i+95:i+64]
+ dst[i+95:i+64] := v[i+31:i]
+ dst[i+127:i+96] := v[i+127:i+96]
+ ENDFOR
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_swizzle_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="v" etype="UI64"/>
+ <parameter type="_MM_SWIZZLE_ENUM" varname="s" etype="UI64"/>
+ <description>Performs a swizzle transformation of each of the two groups of packed 4x64-bit integer elements in "v" using swizzle parameter "s", storing the results in "dst".</description>
+ <operation>CASE s OF
+_MM_SWIZ_REG_NONE:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_DCBA:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_CDAB:
+ FOR j := 0 to 3
+ i := j*64
+ dst[i+63:i] := v[i+127:i+64]
+ dst[i+127:i+64] := v[i+63:i]
+ ENDFOR
+_MM_SWIZ_REG_BADC:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+191:i+128]
+ dst[i+127:i+64] := v[i+255:i+192]
+ dst[i+191:i+128] := v[i+63:i]
+ dst[i+255:i+192] := v[i+127:i+64]
+ ENDFOR
+_MM_SWIZ_REG_AAAA:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+63:i]
+ dst[i+127:i+64] := v[i+63:i]
+ dst[i+191:i+128] := v[i+63:i]
+ dst[i+255:i+192] := v[i+63:i]
+ ENDFOR
+_MM_SWIZ_REG_BBBB:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+127:i+63]
+ dst[i+127:i+64] := v[i+127:i+63]
+ dst[i+191:i+128] := v[i+127:i+63]
+ dst[i+255:i+192] := v[i+127:i+63]
+ ENDFOR
+_MM_SWIZ_REG_CCCC:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+191:i+128]
+ dst[i+127:i+64] := v[i+191:i+128]
+ dst[i+191:i+128] := v[i+191:i+128]
+ dst[i+255:i+192] := v[i+191:i+128]
+ ENDFOR
+_MM_SWIZ_REG_DDDD:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+255:i+192]
+ dst[i+127:i+64] := v[i+255:i+192]
+ dst[i+191:i+128] := v[i+255:i+192]
+ dst[i+255:i+192] := v[i+255:i+192]
+ ENDFOR
+_MM_SWIZ_REG_DACB:
+ FOR j := 0 to 1
+ i := j*256
+ dst[i+63:i] := v[i+127:i+64]
+ dst[i+127:i+64] := v[i+191:i+128]
+ dst[i+191:i+128] := v[i+63:i]
+ dst[i+255:i+192] := v[i+255:i+192]
+ ENDFOR
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_swizzle_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v" etype="FP32"/>
+ <parameter type="_MM_SWIZZLE_ENUM" varname="s" etype="UI32"/>
+ <description>Performs a swizzle transformation of each of the four groups of packed 4x single-precision (32-bit) floating-point elements in "v" using swizzle parameter "s", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>CASE s OF
+_MM_SWIZ_REG_NONE:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_DCBA:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_CDAB:
+ FOR j := 0 to 7
+ i := j*64
+ IF k[j*2]
+ dst[i+31:i] := v[i+63:i+32]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*2+1]
+ dst[i+63:i+32] := v[i+31:i]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_BADC:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+95:i+64]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+127:i+96]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+31:i]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+63:i+32]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_AAAA:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+31:i]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+31:i]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+31:i]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_BBBB:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+63:i+32]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+63:i+32]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+63:i+32]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+63:i+32]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_CCCC:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+95:i+64]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+95:i+64]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+95:i+64]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+95:i+64]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_DDDD:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+127:i+96]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+127:i+96]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+127:i+96]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+127:i+96]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_DACB:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+63:i+32]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+95:i+64]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+31:i]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+127:i+96]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_swizzle_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v" etype="FP64"/>
+ <parameter type="_MM_SWIZZLE_ENUM" varname="s" etype="UI64"/>
+ <description>Performs a swizzle transformation of each of the two groups of packed 4x double-precision (64-bit) floating-point elements in "v" using swizzle parameter "s", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>CASE s OF
+_MM_SWIZ_REG_NONE:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_DCBA:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_CDAB:
+ FOR j := 0 to 3
+ i := j*64
+ IF k[j*2]
+ dst[i+63:i] := v[i+127:i+64]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*2+1]
+ dst[i+127:i+64] := v[i+63:i]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_BADC:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+191:i+128]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+255:i+192]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+63:i]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+127:i+64]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_AAAA:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+63:i]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+63:i]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+63:i]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_BBBB:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+127:i+63]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+127:i+63]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+127:i+63]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+127:i+63]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_CCCC:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+191:i+128]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+191:i+128]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+191:i+128]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+191:i+128]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_DDDD:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+255:i+192]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+255:i+192]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+255:i+192]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+255:i+192]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_DACB:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+127:i+64]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+191:i+128]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+63:i]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+255:i+192]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_swizzle_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v" etype="UI32"/>
+ <parameter type="_MM_SWIZZLE_ENUM" varname="s" etype="UI32"/>
+ <description>Performs a swizzle transformation of each of the four groups of packed 4x32-bit integer elements in "v" using swizzle parameter "s", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>CASE s OF
+_MM_SWIZ_REG_NONE:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_DCBA:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_CDAB:
+ FOR j := 0 to 7
+ i := j*64
+ IF k[j*2]
+ dst[i+31:i] := v[i+63:i+32]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*2+1]
+ dst[i+63:i+32] := v[i+31:i]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_BADC:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+95:i+64]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+127:i+96]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+31:i]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+63:i+32]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_AAAA:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+31:i]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+31:i]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+31:i]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_BBBB:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+63:i+32]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+63:i+32]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+63:i+32]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+63:i+32]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_CCCC:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+95:i+64]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+95:i+64]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+95:i+64]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+95:i+64]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_DDDD:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+127:i+96]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+127:i+96]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+127:i+96]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+127:i+96]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_DACB:
+ FOR j := 0 to 3
+ i := j*128
+ IF k[j*4]
+ dst[i+31:i] := v[i+63:i+32]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ IF k[j*4+1]
+ dst[i+63:i+32] := v[i+95:i+64]
+ ELSE
+ dst[i+63:i+32] := src[i+63:i+32]
+ FI
+ IF k[j*4+2]
+ dst[i+95:i+64] := v[i+31:i]
+ ELSE
+ dst[i+95:i+64] := src[i+95:i+64]
+ FI
+ IF k[j*4+3]
+ dst[i+127:i+96] := v[i+127:i+96]
+ ELSE
+ dst[i+127:i+96] := src[i+127:i+96]
+ FI
+ ENDFOR
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_swizzle_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v" etype="UI64"/>
+ <parameter type="_MM_SWIZZLE_ENUM" varname="s" etype="UI64"/>
+ <description>Performs a swizzle transformation of each of the four groups of packed 4x64-bit integer elements in "v" using swizzle parameter "s", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>CASE s OF
+_MM_SWIZ_REG_NONE:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_DCBA:
+ dst[511:0] := v[511:0]
+_MM_SWIZ_REG_CDAB:
+ FOR j := 0 to 3
+ i := j*64
+ IF k[j*2]
+ dst[i+63:i] := v[i+127:i+64]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*2+1]
+ dst[i+127:i+64] := v[i+63:i]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_BADC:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+191:i+128]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+255:i+192]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+63:i]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+127:i+64]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_AAAA:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+63:i]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+63:i]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+63:i]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_BBBB:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+127:i+63]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+127:i+63]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+127:i+63]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+127:i+63]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_CCCC:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+191:i+128]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+191:i+128]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+191:i+128]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+191:i+128]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_DDDD:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+255:i+192]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+255:i+192]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+255:i+192]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+255:i+192]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+_MM_SWIZ_REG_DACB:
+ FOR j := 0 to 1
+ i := j*256
+ IF k[j*4]
+ dst[i+63:i] := v[i+127:i+64]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ IF k[j*4+1]
+ dst[i+127:i+64] := v[i+191:i+128]
+ ELSE
+ dst[i+127:i+64] := src[i+127:i+64]
+ FI
+ IF k[j*4+2]
+ dst[i+191:i+128] := v[i+63:i]
+ ELSE
+ dst[i+191:i+128] := src[i+191:i+128]
+ FI
+ IF k[j*4+3]
+ dst[i+255:i+192] := v[i+255:i+192]
+ ELSE
+ dst[i+255:i+192] := src[i+255:i+192]
+ FI
+ ENDFOR
+ESAC
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extstore_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP32"/>
+ <parameter type="__m512" varname="v" etype="FP32"/>
+ <parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Downconverts packed single-precision (32-bit) floating-point elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+addr := MEM[mt]
+FOR j := 0 to 15
+ i := j*32
+ CASE conv OF
+ _MM_DOWNCONV_PS_NONE:
+ addr[i+31:i] := v[i+31:i]
+ _MM_DOWNCONV_PS_FLOAT16:
+ n := j*16
+ addr[n+15:n] := Convert_FP32_To_FP16(v[i+31:i])
+ _MM_DOWNCONV_PS_UINT8:
+ n := j*8
+ addr[n+7:n] := Convert_FP32_To_UInt8(v[i+31:i])
+ _MM_DOWNCONV_PS_SINT8:
+ n := j*8
+ addr[n+7:n] := Convert_FP32_To_Int8(v[i+31:i])
+ _MM_DOWNCONV_PS_UINT16:
+ n := j*16
+ addr[n+15:n] := Convert_FP32_To_UInt16(v[i+31:i])
+ _MM_DOWNCONV_PS_SINT16:
+ n := j*16
+ addr[n+15:n] := Convert_FP32_To_Int16(v[i+31:i])
+ ESAC
+ENDFOR
+ </operation>
+ <instruction name="VMOVAPS" form="m512, zmm" xed="VMOVAPS_MEMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extstore_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI32"/>
+ <parameter type="__m512i" varname="v" etype="UI32"/>
+ <parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Downconverts packed 32-bit integer elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>addr := MEM[mt]
+FOR j := 0 to 15
+ i := j*32
+ CASE conv OF
+ _MM_DOWNCONV_EPI32_NONE:
+ addr[i+31:i] := v[i+31:i]
+ _MM_DOWNCONV_EPI32_UINT8:
+ n := j*8
+ addr[n+7:n] := Int32ToUInt8(v[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT8:
+ n := j*8
+ addr[n+7:n] := Int32ToSInt8(v[i+31:i])
+ _MM_DOWNCONV_EPI32_UINT16:
+ n := j*16
+ addr[n+15:n] := Int32ToUInt16(v[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT16:
+ n := j*16
+ addr[n+15:n] := Int32ToSInt16(v[i+31:i])
+ ESAC
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQA32" form="m512, zmm" xed="VMOVDQA32_MEMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extstore_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP64"/>
+ <parameter type="__m512d" varname="v" etype="FP64"/>
+ <parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Downconverts packed double-precision (64-bit) floating-point elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+addr := MEM[mt]
+FOR j := 0 to 7
+ i := j*64
+ CASE conv OF
+ _MM_DOWNCONV_PS_NONE:
+ addr[i+63:i] := v[i+63:i]
+ ESAC
+ENDFOR
+ </operation>
+ <instruction name="VMOVAPD" form="m512, zmm" xed="VMOVAPD_MEMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extstore_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI64"/>
+ <parameter type="__m512i" varname="v" etype="UI64"/>
+ <parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Downconverts packed 64-bit integer elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+addr := MEM[mt]
+FOR j := 0 to 7
+ i := j*64
+ CASE conv OF
+ _MM_DOWNCONV_EPI64_NONE: addr[i+63:i] := v[i+63:i]
+ ESAC
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQA64" form="m512, zmm" xed="VMOVDQA64_MEMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extstore_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v" etype="FP32"/>
+ <parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Downconverts packed single-precision (32-bit) floating-point elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt" using writemask "k" (elements are not written to memory when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ CASE conv OF
+ _MM_DOWNCONV_PS_NONE:
+ mt[i+31:i] := v[i+31:i]
+ _MM_DOWNCONV_PS_FLOAT16:
+ n := j*16
+ mt[n+15:n] := Convert_FP32_To_FP16(v[i+31:i])
+ _MM_DOWNCONV_PS_UINT8:
+ n := j*8
+ mt[n+7:n] := Convert_FP32_To_UInt8(v[i+31:i])
+ _MM_DOWNCONV_PS_SINT8:
+ n := j*8
+ mt[n+7:n] := Convert_FP32_To_Int8(v[i+31:i])
+ _MM_DOWNCONV_PS_UINT16:
+ n := j*16
+ mt[n+15:n] := Convert_FP32_To_UInt16(v[i+31:i])
+ _MM_DOWNCONV_PS_SINT16:
+ n := j*16
+ mt[n+15:n] := Convert_FP32_To_Int16(v[i+31:i])
+ ESAC
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVAPS" form="m512 {k}, zmm" xed="VMOVAPS_MEMf32_MASKmskw_ZMMf32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extstore_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v" etype="FP64"/>
+ <parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Downconverts packed double-precision (64-bit) floating-point elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt" (elements in "mt" are unaltered when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+addr := MEM[mt]
+FOR j := 0 to 7
+ i := j*64
+ CASE conv OF
+ _MM_DOWNCONV_PD_NONE:
+ IF k[j]
+ mt[i+63:i] := v[i+63:i]
+ FI
+ ESAC
+ENDFOR
+ </operation>
+ <instruction name="VMOVAPD" form="m512 {k}, zmm" xed="VMOVAPD_MEMf64_MASKmskw_ZMMf64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extstore_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v" etype="UI32"/>
+ <parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Downconverts packed 32-bit integer elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt" (elements in "mt" are unaltered when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>addr := MEM[mt]
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ CASE conv OF
+ _MM_DOWNCONV_EPI32_NONE:
+ addr[i+31:i] := v[i+31:i]
+ _MM_DOWNCONV_EPI32_UINT8:
+ n := j*8
+ addr[n+7:n] := Int32ToUInt8(v[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT8:
+ n := j*8
+ addr[n+7:n] := Int32ToSInt8(v[i+31:i])
+ _MM_DOWNCONV_EPI32_UINT16:
+ n := j*16
+ addr[n+15:n] := Int32ToUInt16(v[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT16:
+ n := j*16
+ addr[n+15:n] := Int32ToSInt16(v[i+31:i])
+ ESAC
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQA32" form="m512 {k}, zmm" xed="VMOVDQA32_MEMu32_MASKmskw_ZMMu32_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extstore_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v" etype="UI64"/>
+ <parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Downconverts packed 64-bit integer elements stored in "v" to a smaller type depending on "conv" and stores them in memory location "mt" (elements in "mt" are unaltered when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+addr := MEM[mt]
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ CASE conv OF
+ _MM_DOWNCONV_EPI64_NONE: addr[i+63:i] := v[i+63:i]
+ ESAC
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VMOVDQA64" form="m512 {k}, zmm" xed="VMOVDQA64_MEMu64_MASKmskw_ZMMu64_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_storenr_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP32" memwidth="512"/>
+ <parameter type="__m512" varname="v" etype="FP32"/>
+ <description>Stores packed single-precision (32-bit) floating-point elements from "v" to memory address "mt" with a no-read hint to the processor.</description>
+ <operation>
+addr := MEM[mt]
+FOR j := 0 to 15
+ i := j*32
+ addr[i+31:i] := v[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VMOVNRAPS" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_storenr_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP64" memwidth="512"/>
+ <parameter type="__m512d" varname="v" etype="FP64"/>
+ <description>Stores packed double-precision (64-bit) floating-point elements from "v" to memory address "mt" with a no-read hint to the processor.</description>
+ <operation>
+addr := MEM[mt]
+FOR j := 0 to 7
+ i := j*64
+ addr[i+63:i] := v[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VMOVNRAPD" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_storenrngo_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP32" memwidth="512"/>
+ <parameter type="__m512" varname="v" etype="FP32"/>
+ <description>Stores packed single-precision (32-bit) floating-point elements from "v" to memory address "mt" with a no-read hint and using a weakly-ordered memory consistency model (stores performed with this function are not globally ordered, and subsequent stores from the same thread can be observed before them).</description>
+ <operation>
+addr := MEM[mt]
+FOR j := 0 to 15
+ i := j*32
+ addr[i+31:i] := v[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="VMOVNRNGOAPS" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_storenrngo_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP64" memwidth="512"/>
+ <parameter type="__m512d" varname="v" etype="FP64"/>
+ <description>Stores packed double-precision (64-bit) floating-point elements from "v" to memory address "mt" with a no-read hint and using a weakly-ordered memory consistency model (stores performed with this function are not globally ordered, and subsequent stores from the same thread can be observed before them).</description>
+ <operation>
+addr := MEM[mt]
+FOR j := 0 to 7
+ i := j*64
+ addr[i+63:i] := v[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VMOVNRNGOAPD" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_adc_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="k2_res" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element addition of packed 32-bit integers in "v2" and "v3" and the corresponding bit in "k2", storing the result of the addition in "dst" and the result of the carry in "k2_res".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ k2_res[j] := Carry(v2[i+31:i] + v3[i+31:i] + k2[j])
+ dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + k2[j]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADCD" form="zmm, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_adc_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="k2_res" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element addition of packed 32-bit integers in "v2" and "v3" and the corresponding bit in "k2", storing the result of the addition in "dst" and the result of the carry in "k2_res" using writemask "k1" (elements are copied from "v2" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ k2_res[j] := Carry(v2[i+31:i] + v3[i+31:i] + k2[j])
+ dst[i+31:i] := v2[i+31:i] + v3[i+31:i] + k2[j]
+ ELSE
+ dst[i+31:i] := v2[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADCD" form="zmm {k}, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_addn_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="__m512d" varname="v3" etype="FP64"/>
+ <description>Performs element-by-element addition between packed double-precision (64-bit) floating-point elements in "v2" and "v3" and negates their sum, storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := -(v2[i+63:i] + v3[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDNPD" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_addn_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="__m512d" varname="v3" etype="FP64"/>
+ <description>Performs element-by-element addition between packed double-precision (64-bit) floating-point elements in "v2" and "v3" and negates their sum, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(v2[i+63:i] + v3[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDNPD" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_addn_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <description>Performs element-by-element addition between packed single-precision (32-bit) floating-point elements in "v2" and "v3" and negates their sum, storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := -(v2[i+31:i] + v3[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDNPS" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_addn_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <description>Performs element-by-element addition between packed single-precision (32-bit) floating-point elements in "v2" and "v3" and negates their sum, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(v2[i+31:i] + v3[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDNPS" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_addn_round_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="__m512d" varname="v3" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element by element addition between packed double-precision (64-bit) floating-point elements in "v2" and "v3" and negates the sum, storing the result in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := -(v2[i+63:i] + v3[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDNPD" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_addn_round_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="__m512d" varname="v3" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element by element addition between packed double-precision (64-bit) floating-point elements in "v2" and "v3" and negates the sum, storing the result in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := -(v2[i+63:i] + v3[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDNPD" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_addn_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element by element addition between packed single-precision (32-bit) floating-point elements in "v2" and "v3" and negates the sum, storing the result in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := -(v2[i+31:i] + v3[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDNPS" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_addn_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element by element addition between packed single-precision (32-bit) floating-point elements in "v2" and "v3" and negates the sum, storing the result in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := -(v2[i+31:i] + v3[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDNPS" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_subr_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="__m512d" varname="v3" etype="FP64"/>
+ <description>Performs element-by-element subtraction of packed double-precision (64-bit) floating-point elements in "v2" from "v3" storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := v3[i+63:i] - v2[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBRPD" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_subr_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="__m512d" varname="v3" etype="FP64"/>
+ <description>Performs element-by-element subtraction of packed double-precision (64-bit) floating-point elements in "v2" from "v3" storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := v3[i+63:i] - v2[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBRPD" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_subr_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <description>Performs element-by-element subtraction of packed single-precision (32-bit) floating-point elements in "v2" from "v3" storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v3[i+31:i] - v2[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBRPS" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_subr_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <description>Performs element-by-element subtraction of packed single-precision (32-bit) floating-point elements in "v2" from "v3" storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := v3[i+31:i] - v2[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBRPS" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_subr_round_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="__m512d" varname="v3" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element-by-element subtraction of packed double-precision (64-bit) floating-point elements in "v2" from "v3" storing the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := v3[i+63:i] - v2[i+63:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBRPD" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_subr_round_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="__m512d" varname="v3" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element-by-element subtraction of packed double-precision (64-bit) floating-point elements in "v2" from "v3" storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := v3[i+63:i] - v2[i+63:i]
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBRPD" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_subr_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element-by-element subtraction of packed single-precision (32-bit) floating-point elements in "v2" from "v3" storing the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v3[i+31:i] - v2[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBRPS" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_subr_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element-by-element subtraction of packed single-precision (32-bit) floating-point elements in "v2" from "v3" storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := v3[i+31:i] - v2[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSUBRPS" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_subr_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <description>Performs element-by-element subtraction of packed 32-bit integer elements in "v2" from "v3" storing the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v3[i+31:i] - v2[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBRD" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_subr_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <description>Performs element-by-element subtraction of packed 32-bit integer elements in "v2" from "v3" storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set)</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := v3[i+31:i] - v2[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBRD" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_addsetc_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="k2_res" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element addition of packed 32-bit integer elements in "v2" and "v3", storing the resultant carry in "k2_res" (carry flag) and the addition results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v2[i+31:i] + v3[i+31:i]
+ k2_res[j] := Carry(v2[i+31:i] + v3[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDSETCD" form="zmm, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_addsetc_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k_old" etype="MASK"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="k2_res" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element addition of packed 32-bit integer elements in "v2" and "v3", storing the resultant carry in "k2_res" (carry flag) and the addition results in "dst" using writemask "k" (elements are copied from "v2" and "k_old" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := v2[i+31:i] + v3[i+31:i]
+ ELSE
+ dst[i+31:i] := v2[i+31:i]
+ k2_res[j] := k_old[j]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDSETCD" form="zmm {k}, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_addsets_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="sign" etype="MASK" memwidth="16"/>
+ <description>Performs an element-by-element addition of packed 32-bit integer elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v2[i+31:i] + v3[i+31:i]
+ sign[j] := v2[i+31:i] &amp; v3[i+31:i] &amp; 0x80000000
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDSETSD" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_addsets_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="sign" etype="MASK" memwidth="16"/>
+ <description>Performs an element-by-element addition of packed 32-bit integer elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag). Results are stored using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := v2[i+31:i] + v3[i+31:i]
+ sign[j] := v2[i+31:i] &amp; v3[i+31:i] &amp; 0x80000000
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPADDSETSD" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_addsets_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <parameter type="__mmask16 *" varname="sign" etype="MASK" memwidth="16"/>
+ <description>Performs an element-by-element addition of packed single-precision (32-bit) floating-point elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v2[i+31:i] + v3[i+31:i]
+ sign[j] := v2[i+31:i] &amp; v3[i+31:i] &amp; 0x80000000
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDSETSPS" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_addsets_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <parameter type="__mmask16 *" varname="sign" etype="MASK" memwidth="16"/>
+ <description>Performs an element-by-element addition of packed single-precision (32-bit) floating-point elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag). Results are stored using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := v2[i+31:i] + v3[i+31:i]
+ sign[j] := v2[i+31:i] &amp; v3[i+31:i] &amp; 0x80000000
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDSETSPS" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_addsets_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <parameter type="__mmask16 *" varname="sign" etype="MASK" memwidth="16"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs an element-by-element addition of packed single-precision (32-bit) floating-point elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v2[i+31:i] + v3[i+31:i]
+ sign[j] := v2[i+31:i] &amp; v3[i+31:i] &amp; 0x80000000
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDSETSPS" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_addsets_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512" varname="v3" etype="FP32"/>
+ <parameter type="__mmask16 *" varname="sign" etype="MASK" memwidth="16"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs an element-by-element addition of packed single-precision (32-bit) floating-point elements in "v2" and "v3", storing the results in "dst" and the sign of the sum in "sign" (sign flag). Results are stored using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := v2[i+31:i] + v3[i+31:i]
+ sign[j] := v2[i+31:i] &amp; v3[i+31:i] &amp; 0x80000000
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VADDSETSPS" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_subsetb_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="borrow" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element subtraction of packed 32-bit integer elements in "v3" from "v2", storing the results in "dst" and the nth borrow bit in the nth position of "borrow" (borrow flag).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v2[i+31:i] - v3[i+31:i]
+ borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBSETBD" form="zmm, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_subsetb_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k_old" etype="MASK"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="borrow" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element subtraction of packed 32-bit integer elements in "v3" from "v2", storing the results in "dst" and the nth borrow bit in the nth position of "borrow" (borrow flag). Results are stored using writemask "k" (elements are copied from "v2" and "k_old" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := v2[i+31:i] - v3[i+31:i]
+ borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i])
+ ELSE
+ dst[i+31:i] := v3[i+31:i]
+ borrow[j] := k_old[j]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBSETBD" form="zmm {k}, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_subrsetb_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="borrow" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element subtraction of packed 32-bit integer elements in "v2" from "v3", storing the results in "dst" and "v2". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v3[i+31:i] - v2[i+31:i]
+ borrow[j] := Borrow(v3[i+31:i] - v2[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBRSETBD" form="zmm, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_subrsetb_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k_old" etype="MASK"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="borrow" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element subtraction of packed 32-bit integer elements in "v2" from "v3", storing the results in "dst" and "v2". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag). Results are written using writemask "k" (elements are copied from "k" to "k_old" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ diff := v3[i+31:i] - v2[i+31:i]
+ borrow[j] := Borrow(v3[i+31:i] - v2[i+31:i])
+ dst[i+31:i] := diff
+ v2[i+31:i] := diff
+ ELSE
+ borrow[j] := k_old[j]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSUBRSETBD" form="zmm {k}, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_sbb_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="borrow" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element three-input subtraction of packed 32-bit integer elements of "v3" as well as the corresponding bit from "k" from "v2". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag). Results are stored in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v2[i+31:i] - v3[i+31:i] - k[j]
+ borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i] - k[j])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSBBD" form="zmm, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_sbb_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="borrow" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element three-input subtraction of packed 32-bit integer elements of "v3" as well as the corresponding bit from "k2" from "v2". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag). Results are stored in "dst" using writemask "k1" (elements are copied from "v2" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ dst[i+31:i] := v2[i+31:i] - v3[i+31:i] - k2[j]
+ borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i] - k2[j])
+ ELSE
+ dst[i+31:i] := v2[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSBBD" form="zmm {k}, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_sbbr_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="borrow" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element three-input subtraction of packed 32-bit integer elements of "v2" as well as the corresponding bit from "k" from "v3". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag). Results are stored in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v3[i+31:i] - v2[i+31:i] - k[j]
+ borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i] - k[j])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSBBRD" form="zmm, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_sbbr_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <parameter type="__mmask16 *" varname="borrow" etype="MASK" memwidth="16"/>
+ <description>Performs element-by-element three-input subtraction of packed 32-bit integer elements of "v2" as well as the corresponding bit from "k2" from "v3". The borrowed value from the subtraction difference for the nth element is written to the nth bit of "borrow" (borrow flag). Results are stored in "dst" using writemask "k1" (elements are copied from "v2" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k1[j]
+ dst[i+31:i] := v3[i+31:i] - v2[i+31:i] - k2[j]
+ borrow[j] := Borrow(v2[i+31:i] - v3[i+31:i] - k2[j])
+ ELSE
+ dst[i+31:i] := v2[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPSBBRD" form="zmm {k}, k, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_cvt_roundpd_pslo">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to packed single-precision (32-bit) floating-point elements, storing the results in "dst". Results are written to the lower half of "dst", and the upper half locations are set to '0'.
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k := j*32
+ dst[k+31:k] := Convert_FP64_To_FP32(v2[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_cvt_roundpd_pslo">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to packed single-precision (32-bit) floating-point elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Results are written to the lower half of "dst", and the upper half locations are set to '0'.
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_FP64_To_FP32(v2[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTPD2PS" form="zmm {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_cvtfxpnt_roundpd_epu32lo">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to packed 32-bit unsigned integer elements, storing the results in "dst". Results are written to the lower half of "dst", and the upper half locations are set to '0'.
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k := j*32
+ dst[k+31:k] := Convert_FP64_To_Int32(v2[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTFXPNTPD2UDQ" form="zmm, zmm, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_cvtfxpnt_roundpd_epu32lo">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs element-by-element conversion of packed double-precision (64-bit) floating-point elements in "v2" to packed 32-bit unsigned integer elements, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Results are written to the lower half of "dst", and the upper half locations are set to '0'.
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_FP64_To_Int32(v2[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTFXPNTPD2UDQ" form="zmm {k}, zmm, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_cvtfxpnt_round_adjustps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <parameter type="_MM_EXP_ADJ_ENUM" varname="expadj" etype="UI32"/>
+ <description>Performs element-by-element conversion of packed single-precision (32-bit) floating-point elements in "v2" to packed 32-bit integer elements and performs an optional exponent adjust using "expadj", storing the results in "dst".
+ [round_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v2[i+31:i]
+ CASE expadj OF
+ _MM_EXPADJ_NONE: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 0)
+ _MM_EXPADJ_4: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 4)
+ _MM_EXPADJ_5: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 5)
+ _MM_EXPADJ_8: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 8)
+ _MM_EXPADJ_16: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 16)
+ _MM_EXPADJ_24: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 24)
+ _MM_EXPADJ_31: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 31)
+ _MM_EXPADJ_32: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 32)
+ ESAC
+ dst[i+31:i] := Float32ToInt32(dst[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTFXPNTPS2DQ" form="zmm, zmm, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_cvtfxpnt_round_adjustps_epu32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <parameter type="_MM_EXP_ADJ_ENUM" varname="expadj" etype="UI32"/>
+ <description>Performs element-by-element conversion of packed single-precision (32-bit) floating-point elements in "v2" to packed 32-bit unsigned integer elements and performing an optional exponent adjust using "expadj", storing the results in "dst".
+ [round_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := v2[i+31:i]
+ CASE expadj OF
+ _MM_EXPADJ_NONE: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 0)
+ _MM_EXPADJ_4: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 4)
+ _MM_EXPADJ_5: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 5)
+ _MM_EXPADJ_8: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 8)
+ _MM_EXPADJ_16: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 16)
+ _MM_EXPADJ_24: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 24)
+ _MM_EXPADJ_31: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 31)
+ _MM_EXPADJ_32: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 32)
+ ESAC
+ dst[i+31:i] := Float32ToUInt32(dst[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTFXPNTPS2UDQ" form="zmm, zmm, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_cvtfxpnt_round_adjustepu32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <parameter type="_MM_EXP_ADJ_ENUM" varname="expadj" etype="UI32"/>
+ <description>Performs element-by-element conversion of packed 32-bit unsigned integer elements in "v2" to packed single-precision (32-bit) floating-point elements and performing an optional exponent adjust using "expadj", storing the results in "dst".
+ [round_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := UInt32ToFloat32(v2[i+31:i])
+ CASE expadj OF
+ _MM_EXPADJ_NONE: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 0)
+ _MM_EXPADJ_4: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 4)
+ _MM_EXPADJ_5: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 5)
+ _MM_EXPADJ_8: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 8)
+ _MM_EXPADJ_16: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 16)
+ _MM_EXPADJ_24: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 24)
+ _MM_EXPADJ_31: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 31)
+ _MM_EXPADJ_32: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 32)
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTFXPNTUDQ2PS" form="zmm, zmm, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_cvtfxpnt_round_adjustepu32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <parameter type="_MM_EXP_ADJ_ENUM" varname="expadj" etype="UI32"/>
+ <description>Performs element-by-element conversion of packed 32-bit unsigned integer elements in "v2" to packed single-precision (32-bit) floating-point elements and performing an optional exponent adjust using "expadj", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Int32ToFloat32(v2[i+31:i])
+ CASE expadj OF
+ _MM_EXPADJ_NONE: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 0)
+ _MM_EXPADJ_4: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 4)
+ _MM_EXPADJ_5: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 5)
+ _MM_EXPADJ_8: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 8)
+ _MM_EXPADJ_16: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 16)
+ _MM_EXPADJ_24: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 24)
+ _MM_EXPADJ_31: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 31)
+ _MM_EXPADJ_32: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 32)
+ ESAC
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTFXPNTUDQ2PS" form="zmm {k}, zmm, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_exp223_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <description>Approximates the base-2 exponent of the packed single-precision (32-bit) floating-point elements in "v2" with eight bits for sign and magnitude and 24 bits for the fractional part. Results are stored in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := exp223(v2[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP223PS" form="zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_exp223_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <description>Approximates the base-2 exponent of the packed single-precision (32-bit) floating-point elements in "v2" with eight bits for sign and magnitude and 24 bits for the fractional part. Results are stored in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := exp223(v2[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VEXP223PS" form="zmm {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_fixupnan_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="v1" etype="FP64"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="__m512i" varname="v3" etype="UI64"/>
+ <description>Fixes up NaN's from packed double-precision (64-bit) floating-point elements in "v1" and "v2", storing the results in "dst" and storing the quietized NaN's from "v1" in "v3".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := FixupNaNs(v1[i+63:i], v2[i+63:i])
+ v3[i+63:i] := QuietizeNaNs(v1[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPNANPD" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_fixupnan_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="v1" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="__m512i" varname="v3" etype="UI64"/>
+ <description>Fixes up NaN's from packed double-precision (64-bit) floating-point elements in "v1" and "v2", storing the results in "dst" using writemask "k" (only elements whose corresponding mask bit is set are used in the computation). Quietized NaN's from "v1" are stored in "v3".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FixupNaNs(v1[i+63:i], v2[i+63:i])
+ v3[i+63:i] := QuietizeNaNs(v1[i+63:i])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPNANPD" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_fixupnan_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="v1" etype="FP32"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <description>Fixes up NaN's from packed single-precision (32-bit) floating-point elements in "v1" and "v2", storing the results in "dst" and storing the quietized NaN's from "v1" in "v3".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := FixupNaNs(v1[i+31:i], v2[i+31:i])
+ v3[i+31:i] := QuietizeNaNs(v1[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPNANPS" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_fixupnan_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="v1" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v2" etype="FP32"/>
+ <parameter type="__m512i" varname="v3" etype="UI32"/>
+ <description>Fixes up NaN's from packed single-precision (32-bit) floating-point elements in "v1" and "v2", storing the results in "dst" using writemask "k" (only elements whose corresponding mask bit is set are used in the computation). Quietized NaN's from "v1" are stored in "v3".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FixupNaNs(v1[i+31:i], v2[i+31:i])
+ v3[i+31:i] := QuietizeNaNs(v1[i+31:i])
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFIXUPNANPS" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extloadunpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="void const *" varname="mt" etype="UI32"/>
+ <parameter type="_MM_UPCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal.</description>
+ <operation>DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ RETURN MEM[addr + 4*offset]
+ _MM_UPCONV_EPI32_UINT8:
+ RETURN ZeroExtend32(MEM[addr + offset])
+ _MM_UPCONV_EPI32_SINT8:
+ RETURN SignExtend32(MEM[addr + offset])
+ _MM_UPCONV_EPI32_UINT16:
+ RETURN ZeroExtend32(MEM[addr + 2*offset])
+ _MM_UPCONV_EPI32_SINT16:
+ RETURN SignExtend32(MEM[addr + 2*offset])
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ RETURN 4
+ _MM_UPCONV_EPI32_UINT8:
+ RETURN 1
+ _MM_UPCONV_EPI32_SINT8:
+ RETURN 1
+ _MM_UPCONV_EPI32_UINT16:
+ RETURN 2
+ _MM_UPCONV_EPI32_SINT16:
+ RETURN 2
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+upSize := UPCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 15
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*upSize % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ dst[i+31:i] := UPCONVERT(addr, loadOffset, conv)
+ FI
+ loadOffset := loadOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHD" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extloadunpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="UI32"/>
+ <parameter type="_MM_UPCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ RETURN MEM[addr + 4*offset]
+ _MM_UPCONV_EPI32_UINT8:
+ RETURN ZeroExtend32(MEM[addr + offset])
+ _MM_UPCONV_EPI32_SINT8:
+ RETURN SignExtend32(MEM[addr + offset])
+ _MM_UPCONV_EPI32_UINT16:
+ RETURN ZeroExtend32(MEM[addr + 2*offset])
+ _MM_UPCONV_EPI32_SINT16:
+ RETURN SignExtend32(MEM[addr + 2*offset])
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ RETURN 4
+ _MM_UPCONV_EPI32_UINT8:
+ RETURN 1
+ _MM_UPCONV_EPI32_SINT8:
+ RETURN 1
+ _MM_UPCONV_EPI32_UINT16:
+ RETURN 2
+ _MM_UPCONV_EPI32_SINT16:
+ RETURN 2
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+upSize := UPCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 15
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*upSize % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ dst[i+31:i] := UPCONVERT(addr, loadOffset, conv)
+ FI
+ loadOffset := loadOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHD" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extloadunpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="void const *" varname="mt" etype="UI32"/>
+ <parameter type="_MM_UPCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal.</description>
+ <operation>
+DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ RETURN MEM[addr + 4*offset]
+ _MM_UPCONV_EPI32_UINT8:
+ RETURN ZeroExtend32(MEM[addr + offset])
+ _MM_UPCONV_EPI32_SINT8:
+ RETURN SignExtend32(MEM[addr + offset])
+ _MM_UPCONV_EPI32_UINT16:
+ RETURN ZeroExtend32(MEM[addr + 2*offset])
+ _MM_UPCONV_EPI32_SINT16:
+ RETURN SignExtend32(MEM[addr + 2*offset])
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ RETURN 4
+ _MM_UPCONV_EPI32_UINT8:
+ RETURN 1
+ _MM_UPCONV_EPI32_SINT8:
+ RETURN 1
+ _MM_UPCONV_EPI32_UINT16:
+ RETURN 2
+ _MM_UPCONV_EPI32_SINT16:
+ RETURN 2
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+upSize := UPCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := UPCONVERT(addr, loadOffset, conv)
+ loadOffset := loadOffset + 1
+ IF (mt + loadOffset * upSize) % 64 == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLD" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extloadunpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="UI32"/>
+ <parameter type="_MM_UPCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ RETURN MEM[addr + 4*offset]
+ _MM_UPCONV_EPI32_UINT8:
+ RETURN ZeroExtend32(MEM[addr + offset])
+ _MM_UPCONV_EPI32_SINT8:
+ RETURN SignExtend32(MEM[addr + offset])
+ _MM_UPCONV_EPI32_UINT16:
+ RETURN ZeroExtend32(MEM[addr + 2*offset])
+ _MM_UPCONV_EPI32_SINT16:
+ RETURN SignExtend32(MEM[addr + 2*offset])
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE:
+ RETURN 4
+ _MM_UPCONV_EPI32_UINT8:
+ RETURN 1
+ _MM_UPCONV_EPI32_SINT8:
+ RETURN 1
+ _MM_UPCONV_EPI32_UINT16:
+ RETURN 2
+ _MM_UPCONV_EPI32_SINT16:
+ RETURN 2
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+upSize := UPCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 15
+ IF k[j]
+ i := j*32
+ dst[i+31:i] := UPCONVERT(addr, loadOffset, conv)
+ loadOffset := loadOffset + 1
+ IF (mt + loadOffset * upSize) % 64 == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLD" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extloadunpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="void const *" varname="mt" etype="UI64"/>
+ <parameter type="_MM_UPCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal.</description>
+ <operation>DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN MEM[addr + 8*offset]
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN 8
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+upSize := UPCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 7
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*upSize) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ dst[i+63:i] := UPCONVERT(addr, loadOffset, conv)
+ FI
+ loadOffset := loadOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHQ" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extloadunpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="UI64"/>
+ <parameter type="_MM_UPCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN MEM[addr + 8*offset]
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN 8
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+upSize := UPCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 7
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*upSize) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ dst[i+63:i] := UPCONVERT(addr, loadOffset, conv)
+ FI
+ loadOffset := loadOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHQ" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extloadunpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="void const *" varname="mt" etype="UI64"/>
+ <parameter type="_MM_UPCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal.</description>
+ <operation>
+DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN MEM[addr + 8*offset]
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN 8
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+upSize := UPCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := UPCONVERT(addr, loadOffset, conv)
+ loadOffset := loadOffset + 1
+ IF (addr + loadOffset*upSize % 64) == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLQ" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extloadunpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="UI64"/>
+ <parameter type="_MM_UPCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN MEM[addr + 8*offset]
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN 8
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+upSize := UPCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 7
+ IF k[j]
+ i := j*64
+ dst[i+63:i] := UPCONVERT(addr, loadOffset, conv)
+ loadOffset := loadOffset + 1
+ IF (addr + loadOffset*upSize % 64) == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLQ" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extloadunpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="void const *" varname="mt" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal.</description>
+ <operation>DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ RETURN MEM[addr + 4*offset]
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN Convert_FP16_To_FP32(MEM[addr + 4*offset])
+ _MM_UPCONV_PS_UINT8:
+ RETURN Convert_UInt8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_SINT8:
+ RETURN Convert_Int8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_UINT16:
+ RETURN Convert_UInt16_To_FP32(MEM[addr + 2*offset])
+ _MM_UPCONV_PS_SINT16:
+ RETURN Convert_Int16_To_FP32(MEM[addr + 2*offset])
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ RETURN 4
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN 2
+ _MM_UPCONV_PS_UINT8:
+ RETURN 1
+ _MM_UPCONV_PS_SINT8:
+ RETURN 1
+ _MM_UPCONV_PS_UINT16:
+ RETURN 2
+ _MM_UPCONV_PS_SINT16:
+ RETURN 2
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+upSize := UPCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 15
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*upSize % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ dst[i+31:i] := UPCONVERT(addr, loadOffset, conv)
+ FI
+ loadOffset := loadOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHPS" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extloadunpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ RETURN MEM[addr + 4*offset]
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN Convert_FP16_To_FP32(MEM[addr + 4*offset])
+ _MM_UPCONV_PS_UINT8:
+ RETURN Convert_UInt8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_SINT8:
+ RETURN Convert_Int8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_UINT16:
+ RETURN Convert_UInt16_To_FP32(MEM[addr + 2*offset])
+ _MM_UPCONV_PS_SINT16:
+ RETURN Convert_Int16_To_FP32(MEM[addr + 2*offset])
+ ESAC
+}
+DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ RETURN MEM[addr + 4*offset]
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN Convert_FP16_To_FP32(MEM[addr + 4*offset])
+ _MM_UPCONV_PS_UINT8:
+ RETURN Convert_UInt8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_SINT8:
+ RETURN Convert_Int8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_UINT16:
+ RETURN Convert_UInt16_To_FP32(MEM[addr + 2*offset])
+ _MM_UPCONV_PS_SINT16:
+ RETURN Convert_Int16_To_FP32(MEM[addr + 2*offset])
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+upSize := UPCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 15
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*upSize % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ dst[i+31:i] := UPCONVERT(addr, loadOffset, conv)
+ FI
+ loadOffset := loadOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHPS" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extloadunpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="void const *" varname="mt" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal.</description>
+ <operation>DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ RETURN MEM[addr + 4*offset]
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN Convert_FP16_To_FP32(MEM[addr + 4*offset])
+ _MM_UPCONV_PS_UINT8:
+ RETURN Convert_UInt8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_SINT8:
+ RETURN Convert_Int8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_UINT16:
+ RETURN Convert_UInt16_To_FP32(MEM[addr + 2*offset])
+ _MM_UPCONV_PS_SINT16:
+ RETURN Convert_Int16_To_FP32(MEM[addr + 2*offset])
+ ESAC
+}
+DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ RETURN MEM[addr + 4*offset]
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN Convert_FP16_To_FP32(MEM[addr + 4*offset])
+ _MM_UPCONV_PS_UINT8:
+ RETURN Convert_UInt8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_SINT8:
+ RETURN Convert_Int8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_UINT16:
+ RETURN Convert_UInt16_To_FP32(MEM[addr + 2*offset])
+ _MM_UPCONV_PS_SINT16:
+ RETURN Convert_Int16_To_FP32(MEM[addr + 2*offset])
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+upSize := UPCONVERTSIZE(conv)
+addr := MEM[mt]
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := UPCONVERT(addr, loadOffset, conv)
+ loadOffset := loadOffset + 1
+ IF (mt + loadOffset * upSize) % 64 == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLPS" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extloadunpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ RETURN MEM[addr + 4*offset]
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN Convert_FP16_To_FP32(MEM[addr + 4*offset])
+ _MM_UPCONV_PS_UINT8:
+ RETURN Convert_UInt8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_SINT8:
+ RETURN Convert_Int8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_UINT16:
+ RETURN Convert_UInt16_To_FP32(MEM[addr + 2*offset])
+ _MM_UPCONV_PS_SINT16:
+ RETURN Convert_Int16_To_FP32(MEM[addr + 2*offset])
+ ESAC
+}
+DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PS_NONE:
+ RETURN MEM[addr + 4*offset]
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN Convert_FP16_To_FP32(MEM[addr + 4*offset])
+ _MM_UPCONV_PS_UINT8:
+ RETURN Convert_UInt8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_SINT8:
+ RETURN Convert_Int8_To_FP32(MEM[addr + offset])
+ _MM_UPCONV_PS_UINT16:
+ RETURN Convert_UInt16_To_FP32(MEM[addr + 2*offset])
+ _MM_UPCONV_PS_SINT16:
+ RETURN Convert_Int16_To_FP32(MEM[addr + 2*offset])
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+upSize := UPCONVERTSIZE(conv)
+addr := MEM[mt]
+FOR j := 0 to 15
+ IF k[j]
+ i := j*32
+ dst[i+31:i] := UPCONVERT(addr, loadOffset, conv)
+ loadOffset := loadOffset + 1
+ IF (mt + loadOffset * upSize) % 64 == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLPS" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extloadunpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="void const *" varname="mt" etype="FP64"/>
+ <parameter type="_MM_UPCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PD"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed double-precision (64-bit) floating-point values in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal.</description>
+ <operation>DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ RETURN MEM[addr + 8*offset]
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ RETURN 8
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+upSize := UPCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 7
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*upSize) % 64 == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ dst[i+63:i] := UPCONVERT(addr, loadOffset, conv)
+ FI
+ loadOffset := loadOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHPD" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extloadunpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="FP64"/>
+ <parameter type="_MM_UPCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PD"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64, up-converted depending on the value of "conv", and expanded into packed double-precision (64-bit) floating-point values in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ RETURN MEM[addr + 8*offset]
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ RETURN 8
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+upSize := UPCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 7
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*upSize) % 64 == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ dst[i+63:i] := UPCONVERT(addr, loadOffset, conv)
+ FI
+ loadOffset := loadOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHPD" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extloadunpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="void const *" varname="mt" etype="FP64"/>
+ <parameter type="_MM_UPCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PD"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed double-precision (64-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal.</description>
+ <operation>
+DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ RETURN MEM[addr + 8*offset]
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ RETURN 8
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+upSize := UPCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := UPCONVERT(addr, loadOffset, conv)
+ loadOffset := loadOffset + 1
+ IF (mt + loadOffset * upSize) % 64 == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLPD" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extloadunpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="FP64"/>
+ <parameter type="_MM_UPCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PD"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt, up-converted depending on the value of "conv", and expanded into packed double-precision (64-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". "hint" indicates to the processor whether the loaded data is non-temporal. Elements are copied to "dst" according to element selector "k" (elemenst are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE UPCONVERT(addr, offset, convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ RETURN MEM[addr + 8*offset]
+ ESAC
+}
+DEFINE UPCONVERTSIZE(convertTo) {
+ CASE conv OF
+ _MM_UPCONV_PD_NONE:
+ RETURN 8
+ ESAC
+}
+dst[511:0] := src[511:0]
+loadOffset := 0
+upSize := UPCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 7
+ IF k[j]
+ i := j*64
+ dst[i+63:i] := UPCONVERT(addr, loadOffset, conv)
+ loadOffset := loadOffset + 1
+ IF (mt + loadOffset * upSize) % 64 == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLPD" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extpackstorehi_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI32"/>
+ <parameter type="__m512i" varname="v1" etype="UI32"/>
+ <parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_DOWNCONV_EPI32_NONE:
+ RETURN element[31:0]
+ _MM_DOWNCONV_EPI32_UINT8:
+ RETURN Truncate8(element[31:0])
+ _MM_DOWNCONV_EPI32_SINT8:
+ RETURN Saturate8(element[31:0])
+ _MM_DOWNCONV_EPI32_UINT16:
+ RETURN Truncate16(element[31:0])
+ _MM_DOWNCONV_EPI32_SINT16:
+ RETURN Saturate16(element[31:0])
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_DOWNCONV_EPI32_NONE:
+ RETURN 4
+ _MM_DOWNCONV_EPI32_UINT8:
+ RETURN 1
+ _MM_DOWNCONV_EPI32_SINT8:
+ RETURN 1
+ _MM_DOWNCONV_EPI32_UINT16:
+ RETURN 2
+ _MM_DOWNCONV_EPI32_SINT16:
+ RETURN 2
+ ESAC
+}
+storeOffset := 0
+foundNext64BytesBoundary := false
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 15
+ IF foundNext64BytesBoundary == false
+ IF ((addr + (storeOffset + 1)*downSize) % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ tmp := DOWNCONVERT(v1[i+31:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 4: MEM[storeAddr] := tmp[31:0]
+ 2: MEM[storeAddr] := tmp[15:0]
+ 1: MEM[storeAddr] := tmp[7:0]
+ ESAC
+ FI
+ storeOffset := storeOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTOREHD" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extpackstorehi_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v1" etype="UI32"/>
+ <parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresonding mask bit is not set).</description>
+ <operation>DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_DOWNCONV_EPI32_NONE:
+ RETURN element[31:0]
+ _MM_DOWNCONV_EPI32_UINT8:
+ RETURN Truncate8(element[31:0])
+ _MM_DOWNCONV_EPI32_SINT8:
+ RETURN Saturate8(element[31:0])
+ _MM_DOWNCONV_EPI32_UINT16:
+ RETURN Truncate16(element[31:0])
+ _MM_DOWNCONV_EPI32_SINT16:
+ RETURN Saturate16(element[31:0])
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_DOWNCONV_EPI32_NONE:
+ RETURN 4
+ _MM_DOWNCONV_EPI32_UINT8:
+ RETURN 1
+ _MM_DOWNCONV_EPI32_SINT8:
+ RETURN 1
+ _MM_DOWNCONV_EPI32_UINT16:
+ RETURN 2
+ _MM_DOWNCONV_EPI32_SINT16:
+ RETURN 2
+ ESAC
+}
+storeOffset := 0
+foundNext64BytesBoundary := false
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 15
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF ((addr + (storeOffset + 1)*downSize) % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ tmp := DOWNCONVERT(v1[i+31:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 4: MEM[storeAddr] := tmp[31:0]
+ 2: MEM[storeAddr] := tmp[15:0]
+ 1: MEM[storeAddr] := tmp[7:0]
+ ESAC
+ FI
+ storeOffset := storeOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTOREHD" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extpackstorelo_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI32"/>
+ <parameter type="__m512i" varname="v1" etype="UI32"/>
+ <parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_DOWNCONV_EPI32_NONE:
+ RETURN element[31:0]
+ _MM_DOWNCONV_EPI32_UINT8:
+ RETURN Truncate8(element[31:0])
+ _MM_DOWNCONV_EPI32_SINT8:
+ RETURN Saturate8(element[31:0])
+ _MM_DOWNCONV_EPI32_UINT16:
+ RETURN Truncate16(element[31:0])
+ _MM_DOWNCONV_EPI32_SINT16:
+ RETURN Saturate16(element[31:0])
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_DOWNCONV_EPI32_NONE:
+ RETURN 4
+ _MM_DOWNCONV_EPI32_UINT8:
+ RETURN 1
+ _MM_DOWNCONV_EPI32_SINT8:
+ RETURN 1
+ _MM_DOWNCONV_EPI32_UINT16:
+ RETURN 2
+ _MM_DOWNCONV_EPI32_SINT16:
+ RETURN 2
+ ESAC
+}
+storeOffset := 0
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 15
+ i := j*32
+ tmp := DOWNCONVERT(v1[i+31:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 4: MEM[storeAddr] := tmp[31:0]
+ 2: MEM[storeAddr] := tmp[15:0]
+ 1: MEM[storeAddr] := tmp[7:0]
+ ESAC
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset * downSize) % 64) == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTORELD" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extpackstorelo_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v1" etype="UI32"/>
+ <parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed 32-bit integer elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are written to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_DOWNCONV_EPI32_NONE:
+ RETURN element[31:0]
+ _MM_DOWNCONV_EPI32_UINT8:
+ RETURN Truncate8(element[31:0])
+ _MM_DOWNCONV_EPI32_SINT8:
+ RETURN Saturate8(element[31:0])
+ _MM_DOWNCONV_EPI32_UINT16:
+ RETURN Truncate16(element[31:0])
+ _MM_DOWNCONV_EPI32_SINT16:
+ RETURN Saturate16(element[31:0])
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_DOWNCONV_EPI32_NONE:
+ RETURN 4
+ _MM_DOWNCONV_EPI32_UINT8:
+ RETURN 1
+ _MM_DOWNCONV_EPI32_SINT8:
+ RETURN 1
+ _MM_DOWNCONV_EPI32_UINT16:
+ RETURN 2
+ _MM_DOWNCONV_EPI32_SINT16:
+ RETURN 2
+ ESAC
+}
+storeOffset := 0
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 15
+ IF k[j]
+ i := j*32
+ tmp := DOWNCONVERT(v1[i+31:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 4: MEM[storeAddr] := tmp[31:0]
+ 2: MEM[storeAddr] := tmp[15:0]
+ 1: MEM[storeAddr] := tmp[7:0]
+ ESAC
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset * downSize) % 64) == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTORELD" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extpackstorehi_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI64"/>
+ <parameter type="__m512i" varname="v1" etype="UI64"/>
+ <parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN element[63:0]
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN 8
+ ESAC
+}
+storeOffset := 0
+foundNext64BytesBoundary := false
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 7
+ IF foundNext64BytesBoundary == false
+ IF ((addr + (storeOffset + 1)*downSize) % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ tmp := DOWNCONVERT(v1[i+63:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 8: MEM[storeAddr] := tmp[63:0]
+ ESAC
+ FI
+ storeOffset := storeOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTOREHQ" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extpackstorehi_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v1" etype="UI64"/>
+ <parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (mt-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresonding mask bit is not set).</description>
+ <operation>DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN element[63:0]
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN 8
+ ESAC
+}
+storeOffset := 0
+foundNext64BytesBoundary := false
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 7
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF ((addr + (storeOffset + 1)*downSize) % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ tmp := DOWNCONVERT(v1[i+63:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 8: MEM[storeAddr] := tmp[63:0]
+ ESAC
+ FI
+ storeOffset := storeOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTOREHQ" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extpackstorelo_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI64"/>
+ <parameter type="__m512i" varname="v1" etype="UI64"/>
+ <parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN element[63:0]
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN 8
+ ESAC
+}
+storeOffset := 0
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 7
+ i := j*63
+ tmp := DOWNCONVERT(v1[i+63:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 8: MEM[storeAddr] := tmp[63:0]
+ ESAC
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset * downSize) % 64) == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTORELQ" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extpackstorelo_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v1" etype="UI64"/>
+ <parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed 64-bit integer elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped whent he corresponding mask bit is not set).</description>
+ <operation>
+DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN element[63:0]
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_EPI64_NONE:
+ RETURN 8
+ ESAC
+}
+storeOffset := 0
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 7
+ IF k[j]
+ i := j*63
+ tmp := DOWNCONVERT(v1[i+63:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 8: MEM[storeAddr] := tmp[63:0]
+ ESAC
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset * downSize) % 64) == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTORELQ" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extpackstorehi_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP32"/>
+ <parameter type="__m512" varname="v1" etype="FP32"/>
+ <parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PS_NONE:
+ RETURN element[31:0]
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN Convert_FP32_To_FP16(element[31:0])
+ _MM_UPCONV_PS_UINT8:
+ RETURN Truncate8(element[31:0])
+ _MM_UPCONV_PS_SINT8:
+ RETURN Saturate8(element[31:0])
+ _MM_UPCONV_PS_UINT16:
+ RETURN Truncate16(element[31:0])
+ _MM_UPCONV_PS_SINT16:
+ RETURN Saturate16(element[31:0])
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PS_NONE:
+ RETURN 4
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN 2
+ _MM_UPCONV_PS_UINT8:
+ RETURN 1
+ _MM_UPCONV_PS_SINT8:
+ RETURN 1
+ _MM_UPCONV_PS_UINT16:
+ RETURN 2
+ _MM_UPCONV_PS_SINT16:
+ RETURN 2
+ ESAC
+}
+storeOffset := 0
+foundNext64BytesBoundary := false
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 15
+ IF foundNext64BytesBoundary == false
+ IF ((addr + (storeOffset + 1)*downSize) % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ tmp := DOWNCONVERT(v1[i+31:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 4: MEM[storeAddr] := tmp[31:0]
+ 2: MEM[storeAddr] := tmp[15:0]
+ 1: MEM[storeAddr] := tmp[7:0]
+ ESAC
+ FI
+ storeOffset := storeOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTOREHPS" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extpackstorehi_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v1" etype="FP32"/>
+ <parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PS_NONE:
+ RETURN element[31:0]
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN Convert_FP32_To_FP16(element[31:0])
+ _MM_UPCONV_PS_UINT8:
+ RETURN Truncate8(element[31:0])
+ _MM_UPCONV_PS_SINT8:
+ RETURN Saturate8(element[31:0])
+ _MM_UPCONV_PS_UINT16:
+ RETURN Truncate16(element[31:0])
+ _MM_UPCONV_PS_SINT16:
+ RETURN Saturate16(element[31:0])
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PS_NONE:
+ RETURN 4
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN 2
+ _MM_UPCONV_PS_UINT8:
+ RETURN 1
+ _MM_UPCONV_PS_SINT8:
+ RETURN 1
+ _MM_UPCONV_PS_UINT16:
+ RETURN 2
+ _MM_UPCONV_PS_SINT16:
+ RETURN 2
+ ESAC
+}
+storeOffset := 0
+foundNext64BytesBoundary := false
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 15
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF ((addr + (storeOffset + 1)*downSize) % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ tmp := DOWNCONVERT(v1[i+31:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 4: MEM[storeAddr] := tmp[31:0]
+ 2: MEM[storeAddr] := tmp[15:0]
+ 1: MEM[storeAddr] := tmp[7:0]
+ ESAC
+ FI
+ storeOffset := storeOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTOREHPS" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extpackstorelo_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP32"/>
+ <parameter type="__m512" varname="v1" etype="FP32"/>
+ <parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PS_NONE:
+ RETURN element[31:0]
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN Convert_FP32_To_FP16(element[31:0])
+ _MM_UPCONV_PS_UINT8:
+ RETURN Truncate8(element[31:0])
+ _MM_UPCONV_PS_SINT8:
+ RETURN Saturate8(element[31:0])
+ _MM_UPCONV_PS_UINT16:
+ RETURN Truncate16(element[31:0])
+ _MM_UPCONV_PS_SINT16:
+ RETURN Saturate16(element[31:0])
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PS_NONE:
+ RETURN 4
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN 2
+ _MM_UPCONV_PS_UINT8:
+ RETURN 1
+ _MM_UPCONV_PS_SINT8:
+ RETURN 1
+ _MM_UPCONV_PS_UINT16:
+ RETURN 2
+ _MM_UPCONV_PS_SINT16:
+ RETURN 2
+ ESAC
+}
+storeOffset := 0
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 15
+ i := j*32
+ tmp := DOWNCONVERT(v1[i+31:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 4: MEM[storeAddr] := tmp[31:0]
+ 2: MEM[storeAddr] := tmp[15:0]
+ 1: MEM[storeAddr] := tmp[7:0]
+ ESAC
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset * downSize) % 64) == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTORELPS" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extpackstorelo_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v1" etype="FP32"/>
+ <parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed single-precision (32-bit) floating-point elements of "v1" into a byte/word/doubleword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PS_NONE:
+ RETURN element[31:0]
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN Convert_FP32_To_FP16(element[31:0])
+ _MM_UPCONV_PS_UINT8:
+ RETURN Truncate8(element[31:0])
+ _MM_UPCONV_PS_SINT8:
+ RETURN Saturate8(element[31:0])
+ _MM_UPCONV_PS_UINT16:
+ RETURN Truncate16(element[31:0])
+ _MM_UPCONV_PS_SINT16:
+ RETURN Saturate16(element[31:0])
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PS_NONE:
+ RETURN 4
+ _MM_UPCONV_PS_FLOAT16:
+ RETURN 2
+ _MM_UPCONV_PS_UINT8:
+ RETURN 1
+ _MM_UPCONV_PS_SINT8:
+ RETURN 1
+ _MM_UPCONV_PS_UINT16:
+ RETURN 2
+ _MM_UPCONV_PS_SINT16:
+ RETURN 2
+ ESAC
+}
+storeOffset := 0
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 15
+ IF k[j]
+ i := j*32
+ tmp := DOWNCONVERT(v1[i+31:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 4: MEM[storeAddr] := tmp[31:0]
+ 2: MEM[storeAddr] := tmp[15:0]
+ 1: MEM[storeAddr] := tmp[7:0]
+ ESAC
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset * downSize) % 64) == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTORELPS" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extpackstorehi_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP64"/>
+ <parameter type="__m512d" varname="v1" etype="FP64"/>
+ <parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PD_NONE:
+ RETURN element[63:0]
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PD_NONE:
+ RETURN 8
+ ESAC
+}
+storeOffset := 0
+foundNext64BytesBoundary := false
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 7
+ IF foundNext64BytesBoundary == false
+ IF ((addr + (storeOffset + 1)*downSize) % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ tmp := DOWNCONVERT(v1[i+63:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 8: MEM[storeAddr] := tmp[63:0]
+ ESAC
+ FI
+ storeOffset := storeOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTOREHPD" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extpackstorehi_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v1" etype="FP64"/>
+ <parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PD_NONE:
+ RETURN element[63:0]
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PD_NONE:
+ RETURN 8
+ ESAC
+}
+storeOffset := 0
+foundNext64BytesBoundary := false
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt-64
+FOR j := 0 to 7
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF ((addr + (storeOffset + 1)*downSize) % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ tmp := DOWNCONVERT(v1[i+63:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 8: MEM[storeAddr] := tmp[63:0]
+ ESAC
+ FI
+ storeOffset := storeOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTOREHPD" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_extpackstorelo_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP64"/>
+ <parameter type="__m512d" varname="v1" etype="FP64"/>
+ <parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PD_NONE:
+ RETURN element[63:0]
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PD_NONE:
+ RETURN 8
+ ESAC
+}
+storeOffset := 0
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 7
+ i := j*63
+ tmp := DOWNCONVERT(v1[i+63:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 8: MEM[storeAddr] := tmp[63:0]
+ ESAC
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset * downSize) % 64) == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTORELPD" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_extpackstorelo_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mt" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v1" etype="FP64"/>
+ <parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE"/>
+ <description>Down-converts and stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream according to "conv" at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). "hint" indicates to the processor whether the data is non-temporal. Elements are stored to memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE DOWNCONVERT(element, convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PD_NONE:
+ RETURN element[63:0]
+ ESAC
+}
+DEFINE DOWNCONVERTSIZE(convertTo) {
+ CASE convertTo OF
+ _MM_UPCONV_PD_NONE:
+ RETURN 8
+ ESAC
+}
+storeOffset := 0
+downSize := DOWNCONVERTSIZE(conv)
+addr := mt
+FOR j := 0 to 7
+ IF k[j]
+ i := j*63
+ tmp := DOWNCONVERT(v1[i+63:i], conv)
+ storeAddr := addr + storeOffset * downSize
+ CASE downSize OF
+ 8: MEM[storeAddr] := tmp[63:0]
+ ESAC
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset * downSize) % 64) == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPACKSTORELPD" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_i32loscatter_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Stores 8 packed 64-bit integer elements located in "a" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDQ" form="m512, zmm" xed="VPSCATTERDQ_MEMu64_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_i32loscatter_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="base_addr" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI32"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Stores 8 packed 64-bit integer elements located in "a" and stores them in memory locations starting at location "base_addr" at packed 32-bit integer indices stored in "vindex" scaled by "scale" using writemask "k" (elements whose corresponding mask bit is not set are not written to memory).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*32
+ IF k[j]
+ addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+ MEM[addr+63:addr] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPSCATTERDQ" form="m512 {k}, zmm" xed="VPSCATTERDQ_MEMu64_MASKmskw_ZMMu64_AVX512_VL512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_loadunpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="void const*" varname="mt" etype="UI32" memwidth="512"/>
+ <description>Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64 and expands them into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src".</description>
+ <operation>dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+addr := mt-64
+FOR j := 0 to 15
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*4 % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ tmp := MEM[addr + loadOffset*4]
+ dst[i+31:i] := tmp[i+31:i]
+ FI
+ loadOffset := loadOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHD" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_loadunpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const *" varname="mt" etype="UI32" memwidth="512"/>
+ <description>Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64 and expands them into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+addr := mt-64
+FOR j := 0 to 15
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*4 % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ tmp := MEM[addr + loadOffset*4]
+ dst[i+31:i] := tmp[i+31:i]
+ FI
+ loadOffset := loadOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHD" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_loadunpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="void const*" varname="mt" etype="UI32" memwidth="512"/>
+ <description>Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt and expanded into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src".</description>
+ <operation>
+dst[511:0] := src[511:0]
+loadOffset := 0
+addr := mt
+FOR j := 0 to 15
+ i := j*32
+ tmp := MEM[addr + loadOffset*4]
+ dst[i+31:i] := tmp[i+31:i]
+ loadOffset := loadOffset + 1
+ IF (mt + loadOffset * 4) % 64 == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLD" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_loadunpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mt" etype="UI32" memwidth="512"/>
+ <description>Loads the low-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt and expands them into packed 32-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+dst[511:0] := src[511:0]
+loadOffset := 0
+addr := mt
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ tmp := MEM[addr + loadOffset*4]
+ dst[i+31:i] := tmp[i+31:i]
+ loadOffset := loadOffset + 1
+ IF (mt + loadOffset * 4) % 64 == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLD" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_loadunpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="void const*" varname="mt" etype="UI64" memwidth="512"/>
+ <description>Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64 and expands them into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src".</description>
+ <operation>dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+addr := mt-64
+FOR j := 0 to 7
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*8) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ tmp := MEM[addr + loadOffset*8]
+ dst[i+63:i] := tmp[i+63:i]
+ FI
+ loadOffset := loadOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHQ" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_loadunpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mt" etype="UI64" memwidth="512"/>
+ <description>Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64 and expands them into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+addr := mt-64
+FOR j := 0 to 7
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*8) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ tmp := MEM[addr + loadOffset*8]
+ dst[i+63:i] := tmp[i+63:i]
+ FI
+ loadOffset := loadOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHQ" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_loadunpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="void const*" varname="mt" etype="UI64" memwidth="512"/>
+ <description>Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt and expands them into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src".</description>
+ <operation>
+dst[511:0] := src[511:0]
+loadOffset := 0
+addr := mt
+FOR j := 0 to 7
+ i := j*64
+ tmp := MEM[addr + loadOffset*8]
+ dst[i+63:i] := tmp[i+63:i]
+ loadOffset := loadOffset + 1
+ IF (addr + loadOffset*8 % 64) == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLQ" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_loadunpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mt" etype="UI64" memwidth="512"/>
+ <description>Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt and expands them into packed 64-bit integers in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+dst[511:0] := src[511:0]
+loadOffset := 0
+addr := mt
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp := MEM[addr + loadOffset*8]
+ dst[i+63:i] := tmp[i+63:i]
+ loadOffset := loadOffset + 1
+ IF (addr + loadOffset*8 % 64) == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLQ" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_loadunpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="void const*" varname="mt" etype="FP32" memwidth="512"/>
+ <description>Loads the high-64-byte-aligned portion of the byte/word/doubleword stream starting at element-aligned address mt-64 and expands them into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src".</description>
+ <operation>dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+addr := mt-64
+FOR j := 0 to 15
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*4 % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ tmp := MEM[addr + loadOffset*4]
+ dst[i+31:i] := tmp[i+31:i]
+ FI
+ loadOffset := loadOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHPS" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_loadunpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mt" etype="FP32" memwidth="512"/>
+ <description>Loads the high-64-byte-aligned portion of the doubleword stream starting at element-aligned address mt-64 and expands them into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+addr := mt-64
+FOR j := 0 to 15
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*4 % 64) == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*32
+ tmp := MEM[addr + loadOffset*4]
+ dst[i+31:i] := tmp[i+31:i]
+ FI
+ loadOffset := loadOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHPS" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_loadunpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="void const*" varname="mt" etype="FP32" memwidth="512"/>
+ <description>Loads the low-64-byte-aligned portion of the doubleword stream starting at element-aligned address mt and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src".</description>
+ <operation>
+dst[511:0] := src[511:0]
+loadOffset := 0
+addr := mt
+FOR j := 0 to 15
+ i := j*32
+ tmp := MEM[addr + loadOffset*4]
+ dst[i+31:i] := tmp[i+31:i]
+ loadOffset := loadOffset + 1
+ IF (mt + loadOffset * 4) % 64 == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLPS" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_loadunpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mt" etype="FP32" memwidth="512"/>
+ <description>Loads the low-64-byte-aligned portion of the doubleword stream starting at element-aligned address mt and expanded into packed single-precision (32-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted doublewords that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those doublewords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+dst[511:0] := src[511:0]
+loadOffset := 0
+addr := mt
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ tmp := MEM[addr + loadOffset*4]
+ dst[i+31:i] := tmp[i+31:i]
+ loadOffset := loadOffset + 1
+ IF (mt + loadOffset * 4) % 64 == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLPS" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_loadunpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="void const*" varname="mt" etype="FP64" memwidth="512"/>
+ <description>Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64 and expands them into packed double-precision (64-bit) floating-point values in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src".</description>
+ <operation>dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+addr := mt-64
+FOR j := 0 to 7
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*8) % 64 == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ tmp := MEM[addr + loadOffset*8]
+ dst[i+63:i] := tmp[i+63:i]
+ FI
+ loadOffset := loadOffset + 1
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHPD" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_loadunpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mt" etype="FP64" memwidth="512"/>
+ <description>Loads the high-64-byte-aligned portion of the quadword stream starting at element-aligned address mt-64 and expands them into packed double-precision (64-bit) floating-point values in "dst". The initial values of "dst" are copied from "src". Only those converted quadwords that occur at or after the first 64-byte-aligned address following (mt-64) are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>dst[511:0] := src[511:0]
+loadOffset := 0
+foundNext64BytesBoundary := false
+addr := mt-64
+FOR j := 0 to 7
+ IF k[j]
+ IF foundNext64BytesBoundary == false
+ IF (addr + (loadOffset + 1)*8) % 64 == 0
+ foundNext64BytesBoundary := true
+ FI
+ ELSE
+ i := j*64
+ tmp := MEM[addr + loadOffset*8]
+ dst[i+63:i] := tmp[i+63:i]
+ FI
+ loadOffset := loadOffset + 1
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKHPD" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_loadunpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="void const*" varname="mt" etype="FP64" memwidth="512"/>
+ <description>Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt and expands them into packed double-precision (64-bit) floating-point elements in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src".</description>
+ <operation>
+dst[511:0] := src[511:0]
+loadOffset := 0
+addr := mt
+FOR j := 0 to 7
+ i := j*64
+ tmp := MEM[addr + loadOffset*8]
+ dst[i+63:i] := tmp[i+63:i]
+ loadOffset := loadOffset + 1
+ IF ((addr + 8*loadOffset) % 64) == 0
+ BREAK
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLPD" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_loadunpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="void const*" varname="mt" etype="FP64" memwidth="512"/>
+ <description>Loads the low-64-byte-aligned portion of the quadword stream starting at element-aligned address mt and expands them into packed double-precision (64-bit) floating-point values in "dst". The initial values of "dst" are copied from "src". Only those converted quad that occur before first 64-byte-aligned address following "mt" are loaded. Elements in the resulting vector that do not map to those quadwords are taken from "src". Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+dst[511:0] := src[511:0]
+loadOffset := 0
+addr := mt
+FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ tmp := MEM[addr + loadOffset*8]
+ dst[i+63:i] := tmp[i+63:i]
+ loadOffset := loadOffset + 1
+ IF ((addr + 8*loadOffset) % 64) == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOADUNPACKLPD" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_packstorehi_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="UI32" memwidth="512"/>
+ <parameter type="__m512i" varname="v1" etype="UI32"/>
+ <description>Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)).</description>
+ <operation>
+storeOffset := 0
+foundNext64BytesBoundary := 0
+addr := mt-64
+FOR j := 0 to 15
+ IF foundNext64BytesBoundary == 0
+ IF ((addr + (storeOffset + 1)*4) % 64) == 0
+ foundNext64BytesBoundary := 1
+ FI
+ ELSE
+ i := j*32
+ MEM[addr + storeOffset*4] := v1[i+31:i]
+ FI
+ storeOffset := storeOffset + 1
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTOREHD" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_packstorehi_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="UI32" memwidth="512"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v1" etype="UI32"/>
+ <description>Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elements of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+storeOffset := 0
+foundNext64BytesBoundary := 0
+addr := mt-64
+FOR j := 0 to 15
+ IF k[j]
+ IF foundNext64BytesBoundary == 0
+ IF ((addr + (storeOffset + 1)*4) % 64) == 0
+ foundNext64BytesBoundary := 1
+ FI
+ ELSE
+ i := j*32
+ MEM[addr + storeOffset*4] := v1[i+31:i]
+ FI
+ storeOffset := storeOffset + 1
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTOREHD" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_packstorelo_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="UI32" memwidth="512"/>
+ <parameter type="__m512i" varname="v1" etype="UI32"/>
+ <description>Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt").</description>
+ <operation>
+storeOffset := 0
+addr := mt
+FOR j := 0 to 15
+ i := j*32
+ MEM[addr + storeOffset*4] := v1[i+31:i]
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset*4) % 64) == 0
+ BREAK
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTORELD" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_packstorelo_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="UI32" memwidth="512"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v1" etype="UI32"/>
+ <description>Stores packed 32-bit integer elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+storeOffset := 0
+addr := mt
+FOR j := 0 to 15
+ IF k[j]
+ i := j*32
+ MEM[addr + storeOffset*4] := v1[i+31:i]
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset*4) % 64) == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTORELD" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_packstorehi_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="UI64" memwidth="512"/>
+ <parameter type="__m512i" varname="v1" etype="UI64"/>
+ <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)).</description>
+ <operation>
+storeOffset := 0
+foundNext64BytesBoundary := 0
+addr := mt-64
+FOR j := 0 to 7
+ IF foundNext64BytesBoundary == 0
+ IF ((addr + (storeOffset + 1)*8) % 64) == 0
+ foundNext64BytesBoundary := 1
+ FI
+ ELSE
+ i := j*64
+ MEM[addr + storeOffset*8] := v1[i+63:i]
+ FI
+ storeOffset := storeOffset + 1
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTOREHQ" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_packstorehi_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="UI64" memwidth="512"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v1" etype="UI64"/>
+ <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+storeOffset := 0
+foundNext64BytesBoundary := 0
+addr := mt-64
+FOR j := 0 to 7
+ IF k[j]
+ IF foundNext64BytesBoundary == 0
+ IF ((addr + (storeOffset + 1)*8) % 64) == 0
+ foundNext64BytesBoundary := 1
+ FI
+ ELSE
+ i := j*64
+ MEM[addr + storeOffset*8] := v1[i+63:i]
+ FI
+ storeOffset := storeOffset + 1
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTOREHQ" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_packstorelo_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="UI64" memwidth="512"/>
+ <parameter type="__m512i" varname="v1" etype="UI64"/>
+ <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt").</description>
+ <operation>
+storeOffset := 0
+addr := mt
+FOR j := 0 to 7
+ i := j*64
+ MEM[addr + storeOffset*8] := v1[i+63:i]
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset*8) % 64) == 0
+ BREAK
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTORELQ" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_packstorelo_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="UI64" memwidth="512"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="v1" etype="UI64"/>
+ <description>Stores packed 64-bit integer elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+storeOffset := 0
+addr := mt
+FOR j := 0 to 7
+ IF k[j]
+ i := j*64
+ MEM[addr + storeOffset*8] := v1[i+63:i]
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset*8) % 64) == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTORELQ" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_packstorehi_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="FP32" memwidth="512"/>
+ <parameter type="__m512" varname="v1" etype="FP32"/>
+ <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)).</description>
+ <operation>
+storeOffset := 0
+foundNext64BytesBoundary := 0
+addr := mt-64
+FOR j := 0 to 15
+ IF foundNext64BytesBoundary == 0
+ IF ((addr + (storeOffset + 1)*4) % 64) == 0
+ foundNext64BytesBoundary := 1
+ FI
+ ELSE
+ i := j*32
+ MEM[addr + storeOffset*4] := v1[i+31:i]
+ FI
+ storeOffset := storeOffset + 1
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTOREHPS" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_packstorehi_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="FP32" memwidth="512"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v1" etype="FP32"/>
+ <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+storeOffset := 0
+foundNext64BytesBoundary := 0
+addr := mt-64
+FOR j := 0 to 15
+ IF k[j]
+ IF foundNext64BytesBoundary == 0
+ IF ((addr + (storeOffset + 1)*4) % 64) == 0
+ foundNext64BytesBoundary := 1
+ FI
+ ELSE
+ i := j*32
+ MEM[addr + storeOffset*4] := v1[i+31:i]
+ FI
+ storeOffset := storeOffset + 1
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTOREHPS" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_packstorelo_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="FP32" memwidth="512"/>
+ <parameter type="__m512" varname="v1" etype="FP32"/>
+ <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt").</description>
+ <operation>
+storeOffset := 0
+addr := mt
+FOR j := 0 to 15
+ i := j*32
+ MEM[addr + storeOffset*4] := v1[i+31:i]
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset*4) % 64) == 0
+ BREAK
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTORELPS" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_packstorelo_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="FP32" memwidth="512"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="v1" etype="FP32"/>
+ <description>Stores packed single-precision (32-bit) floating-point elements of "v1" into a doubleword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+storeOffset := 0
+addr := mt
+FOR j := 0 to 15
+ IF k[j]
+ i := j*32
+ MEM[addr + storeOffset*4] := v1[i+31:i]
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset*4) % 64) == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTORELPS" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_packstorehi_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="FP64" memwidth="512"/>
+ <parameter type="__m512d" varname="v1" etype="FP64"/>
+ <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)).</description>
+ <operation>
+storeOffset := 0
+foundNext64BytesBoundary := 0
+addr := mt-64
+FOR j := 0 to 7
+ IF foundNext64BytesBoundary == 0
+ IF ((addr + (storeOffset + 1)*8) % 64) == 0
+ foundNext64BytesBoundary := 1
+ FI
+ ELSE
+ i := j*64
+ MEM[addr + storeOffset*4] := v1[i+63:i]
+ FI
+ storeOffset := storeOffset + 1
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTOREHPD" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_packstorehi_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="FP64" memwidth="512"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v1" etype="FP64"/>
+ <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address (mt-64), storing the high-64-byte elements of that stream (those elemetns of the stream that map at or after the first 64-byte-aligned address following (m5-64)). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+storeOffset := 0
+foundNext64BytesBoundary := 0
+addr := mt-64
+FOR j := 0 to 7
+ IF k[j]
+ IF foundNext64BytesBoundary == 0
+ IF ((addr + (storeOffset + 1)*8) % 64) == 0
+ foundNext64BytesBoundary := 1
+ FI
+ ELSE
+ i := j*64
+ MEM[addr + storeOffset*4] := v1[i+63:i]
+ FI
+ storeOffset := storeOffset + 1
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTOREHPD" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_packstorelo_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="FP64" memwidth="512"/>
+ <parameter type="__m512d" varname="v1" etype="FP64"/>
+ <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt").</description>
+ <operation>
+storeOffset := 0
+addr := mt
+FOR j := 0 to 7
+ i := j*64
+ MEM[addr + storeOffset*8] := v1[i+63:i]
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset*8) % 64) == 0
+ BREAK
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTORELPD" form="m512, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_packstorelo_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mt" etype="FP64" memwidth="512"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v1" etype="FP64"/>
+ <description>Stores packed double-precision (64-bit) floating-point elements of "v1" into a quadword stream at a logically mapped starting address "mt", storing the low-64-byte elements of that stream (those elements of the stream that map before the first 64-byte-aligned address follwing "mt"). Elements are loaded from memory according to element selector "k" (elements are skipped when the corresponding mask bit is not set).</description>
+ <operation>
+storeOffset := 0
+addr := mt
+FOR j := 0 to 7
+ IF k[j]
+ i := j*64
+ MEM[addr + storeOffset*8] := v1[i+63:i]
+ storeOffset := storeOffset + 1
+ IF ((addr + storeOffset*8) % 64) == 0
+ BREAK
+ FI
+ FI
+ENDFOR
+ </operation>
+ <instruction name="VPACKSTORELPD" form="m512 {k}, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm_countbits_32">
+ <CPUID>KNCNI</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="r1" etype="UI32"/>
+ <description>Counts the number of set bits in 32-bit unsigned integer "r1", returning the results in "dst".</description>
+ <operation>dst[31:0] := PopCount(r1[31:0])
+ </operation>
+ <instruction name="POPCNT" form="r32, r32" xed="POPCNT_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm_countbits_64">
+ <CPUID>KNCNI</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="r1" etype="UI64"/>
+ <description>Counts the number of set bits in 64-bit unsigned integer "r1", returning the results in "dst".</description>
+ <operation>dst[63:0] := PopCount(r1[63:0])
+ </operation>
+ <instruction name="POPCNT" form="r64, r64" xed="POPCNT_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kmovlhb">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="dst" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <description>Inserts the low byte of mask "k2" into the high byte of "dst", and copies the low byte of "k1" to the low byte of "dst".</description>
+ <operation>
+dst[7:0] := k1[7:0]
+dst[15:8] := k2[7:0]
+ </operation>
+ <instruction name="KMERGE2L1L" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_cvtfxpnt_roundpd_epi32lo">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs an element-by-element conversion of elements in packed double-precision (64-bit) floating-point vector "v2" to 32-bit integer elements, storing them in the lower half of "dst". The elements in the upper half of "dst" are set to 0.
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ k := j*32
+ dst[k+31:k] := Convert_FP64_To_Int32(v2[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTFXPNTPD2DQ" form="zmm, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_cvtfxpnt_roundpd_epi32lo">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512i" varname="dst" etype="SI32"/>
+ <parameter type="__m512i" varname="src" etype="SI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="v2" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Performs an element-by-element conversion of elements in packed double-precision (64-bit) floating-point vector "v2" to 32-bit integer elements, storing them in the lower half of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). The elements in the upper half of "dst" are set to 0.
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ l := j*32
+ IF k[j]
+ dst[l+31:l] := Convert_FP64_To_Int32(v2[i+63:i])
+ ELSE
+ dst[l+31:l] := src[l+31:l]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTFXPNTPD2DQ" form="zmm {k}, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_cvtfxpnt_round_adjustepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="v2" etype="UI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <parameter type="_MM_EXP_ADJ_ENUM" varname="expadj" etype="UI32"/>
+ <description>Performs element-by-element conversion of packed 32-bit integer elements in "v2" to packed single-precision (32-bit) floating-point elements and performing an optional exponent adjust using "expadj", storing the results in "dst".
+ [round_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := Int32ToFloat32(v2[i+31:i])
+ CASE expadj OF
+ _MM_EXPADJ_NONE: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 0)
+ _MM_EXPADJ_4: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 4)
+ _MM_EXPADJ_5: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 5)
+ _MM_EXPADJ_8: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 8)
+ _MM_EXPADJ_16: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 16)
+ _MM_EXPADJ_24: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 24)
+ _MM_EXPADJ_31: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 31)
+ _MM_EXPADJ_32: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 32)
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VCVTFXPNTDQ2PS" form="zmm, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_log2ae23_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a" with absolute error of 2^(-23) and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOG2PS" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_log2ae23_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a" with absolute error of 2^(-23) and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0)
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VLOG2PS" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_fmadd_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <description>Multiply packed 32-bit integer elements in "a" and "b", add the intermediate result to packed elements in "c" and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADD231D" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_fmadd_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <description>Multiply packed 32-bit integer elements in "a" and "b", add the intermediate result to packed elements in "c" and store the results in "dst" using writemask "k" (elements are copied from "a" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADD231D" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask3_fmadd_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="__m512i" varname="c" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <description>Multiply packed 32-bit integer elements in "a" and "b", add the intermediate result to packed elements in "c" and store the results in "dst" using writemask "k" (elements are copied from "c" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) + c[i+31:i]
+ ELSE
+ dst[i+31:i] := c[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADD231D" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_fmadd233_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Multiply packed 32-bit integer elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ base := (j &amp; ~0x3) * 32
+ scale[31:0] := b[base+63:base+32]
+ bias[31:0] := b[base+31:base]
+ dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADD233D" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_fmadd233_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Multiply packed 32-bit integer elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ base := (j &amp; ~0x3) * 32
+ scale[31:0] := b[base+63:base+32]
+ bias[31:0] := b[base+31:base]
+ dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMADD233D" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_fmadd233_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ base := (j &amp; ~0x3) * 32
+ scale[31:0] := b[base+63:base+32]
+ bias[31:0] := b[base+31:base]
+ dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD233PS" form="zmm, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_fmadd233_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ base := (j &amp; ~0x3) * 32
+ scale[31:0] := b[base+63:base+32]
+ bias[31:0] := b[base+31:base]
+ dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD233PS" form="zmm {k}, zmm, zmm"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_maxabs_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Determines the maximum of the absolute elements of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := FpMax(ABS(a[i+31:i]), ABS(b[i+31:i]))
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMAXABSPS" form="zmm, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_maxabs_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Determines the maximum of the absolute elements of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FpMax(ABS(a[i+31:i]), ABS(b[i+31:i]))
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMAXABSPS" form="zmm {k}, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_gmax_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Determines the maximum of each pair of corresponding elements in packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := FpMax(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMAXPS" form="zmm, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_gmax_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Determines the maximum of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FpMax(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMAXPS" form="zmm {k}, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_gmaxabs_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Determines the maximum of the absolute elements of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := FpMax(ABS(a[i+31:i]), ABS(b[i+31:i]))
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMAXABSPS" form="zmm, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_gmaxabs_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Determines the maximum of the absolute elements of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FpMax(ABS(a[i+31:i]), ABS(b[i+31:i]))
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMAXABSPS" form="zmm {k}, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_gmax_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Determines the maximum of each pair of corresponding elements in packed double-precision (64-bit) floating-point elements in "a" and "b", storing the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := FpMax(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMAXPD" form="zmm, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_gmax_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Determines the maximum of each pair of corresponding elements of packed double-precision (64-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FpMax(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMAXPD" form="zmm {k}, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_gmin_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Determines the minimum of each pair of corresponding elements in packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := FpMin(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMINPS" form="zmm, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_gmin_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Determines the maximum of each pair of corresponding elements of packed single-precision (32-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := FpMin(a[i+31:i], b[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMINPS" form="zmm {k}, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_gmin_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Determines the minimum of each pair of corresponding elements in packed double-precision (64-bit) floating-point elements in "a" and "b", storing the results in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := FpMin(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMINPD" form="zmm, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_gmin_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="__m512d" varname="b" etype="FP64"/>
+ <description>Determines the maximum of each pair of corresponding elements of packed double-precision (64-bit) floating-point elements in "a" and "b", storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := FpMin(a[i+63:i], b[i+63:i])
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VGMINPD" form="zmm {k}, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mulhi_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Performs element-by-element multiplication between packed 32-bit integer elements in "a" and "b" and stores the high 32 bits of each result into "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) &gt;&gt; 32
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHD" form="zmm, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_mulhi_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Performs element-by-element multiplication between packed 32-bit integer elements in "a" and "b" and stores the high 32 bits of each result into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) &gt;&gt; 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHD" form="zmm {k}, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mulhi_epu32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Performs element-by-element multiplication between packed unsigned 32-bit integer elements in "a" and "b" and stores the high 32 bits of each result into "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) &gt;&gt; 32
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHUD" form="zmm, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_mulhi_epu32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Performs element-by-element multiplication between packed unsigned 32-bit integer elements in "a" and "b" and stores the high 32 bits of each result into "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (a[i+31:i] * b[i+31:i]) &gt;&gt; 32
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPMULHUD" form="zmm {k}, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_permute4f128_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="_MM_PERM_ENUM" varname="imm8" etype="IMM" immtype="_MM_PERM"/>
+ <description>Permutes 128-bit blocks of the packed 32-bit integer vector "a" using constant "imm8". The results are stored in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE control[1:0] OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+FOR j := 0 to 3
+ i := j*128
+ n := j*2
+ dst[i+127:i] := SELECT4(a[511:0], imm8[n+1:n])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMF32X4" form="zmm, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_permute4f128_epi32">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="_MM_PERM_ENUM" varname="imm8" etype="IMM" immtype="_MM_PERM"/>
+ <description>Permutes 128-bit blocks of the packed 32-bit integer vector "a" using constant "imm8". The results are stored in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE control[1:0] OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+tmp[511:0] := 0
+FOR j := 0 to 3
+ i := j*128
+ n := j*2
+ tmp[i+127:i] := SELECT4(a[511:0], imm8[n+1:n])
+ENDFOR
+FOR j := 0 to 15
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMF32X4" form="zmm {k}, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_rcp23_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Approximates the reciprocals of packed single-precision (32-bit) floating-point elements in "a" to 23 bits of precision, storing the results in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRCP23PS" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_rcp23_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Approximates the reciprocals of packed single-precision (32-bit) floating-point elements in "a" to 23 bits of precision, storing the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRCP23PS" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <parameter type="_MM_EXP_ADJ_ENUM" varname="expadj" etype="UI32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" to the nearest integer value using "expadj" and in the direction of "rounding", and store the results as packed single-precision floating-point elements in "dst".
+ [round_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ROUND(a[i+31:i])
+ CASE expadj OF
+ _MM_EXPADJ_NONE: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 0)
+ _MM_EXPADJ_4: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 4)
+ _MM_EXPADJ_5: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 5)
+ _MM_EXPADJ_8: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 8)
+ _MM_EXPADJ_16: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 16)
+ _MM_EXPADJ_24: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 24)
+ _MM_EXPADJ_31: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 31)
+ _MM_EXPADJ_32: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 32)
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VROUNDPS" form="zmm, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Convert</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <parameter type="_MM_EXP_ADJ_ENUM" varname="expadj" etype="UI32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" to the nearest integer value using "expadj" and in the direction of "rounding", and store the results as packed single-precision floating-point elements in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ROUND(a[i+31:i])
+ CASE expadj OF
+ _MM_EXPADJ_NONE: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 0)
+ _MM_EXPADJ_4: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 4)
+ _MM_EXPADJ_5: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 5)
+ _MM_EXPADJ_8: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 8)
+ _MM_EXPADJ_16: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 16)
+ _MM_EXPADJ_24: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 24)
+ _MM_EXPADJ_31: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 31)
+ _MM_EXPADJ_32: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 32)
+ ESAC
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VROUNDPS" form="zmm {k}, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_roundfxpnt_adjust_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <parameter type="_MM_EXP_ADJ_ENUM" varname="expadj" etype="UI32"/>
+ <description>Performs element-by-element rounding of packed single-precision (32-bit) floating-point elements in "a" using "expadj" and in the direction of "rounding" and stores results in "dst".
+ [round_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := ROUND(a[i+31:i])
+ CASE expadj OF
+ _MM_EXPADJ_NONE: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 0)
+ _MM_EXPADJ_4: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 4)
+ _MM_EXPADJ_5: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 5)
+ _MM_EXPADJ_8: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 8)
+ _MM_EXPADJ_16: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 16)
+ _MM_EXPADJ_24: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 24)
+ _MM_EXPADJ_31: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 31)
+ _MM_EXPADJ_32: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 32)
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDFXPNTPS" form="zmm, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_roundfxpnt_adjust_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <parameter type="_MM_EXP_ADJ_ENUM" varname="expadj" etype="UI32"/>
+ <description>Performs element-by-element rounding of packed single-precision (32-bit) floating-point elements in "a" using "expadj" and in the direction of "rounding" and stores results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := ROUND(a[i+31:i])
+ CASE expadj OF
+ _MM_EXPADJ_NONE: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 0)
+ _MM_EXPADJ_4: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 4)
+ _MM_EXPADJ_5: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 5)
+ _MM_EXPADJ_8: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 8)
+ _MM_EXPADJ_16: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 16)
+ _MM_EXPADJ_24: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 24)
+ _MM_EXPADJ_31: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 31)
+ _MM_EXPADJ_32: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 32)
+ ESAC
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDFXPNTPS" form="zmm {k}, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_roundfxpnt_adjust_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <parameter type="_MM_EXP_ADJ_ENUM" varname="expadj" etype="UI64"/>
+ <description>Performs element-by-element rounding of packed double-precision (64-bit) floating-point elements in "a" using "expadj" and in the direction of "rounding" and stores results in "dst".
+ [round_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ dst[i+63:i] := ROUND(a[i+63:i])
+ CASE expadj OF
+ _MM_EXPADJ_NONE: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 0)
+ _MM_EXPADJ_4: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 4)
+ _MM_EXPADJ_5: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 5)
+ _MM_EXPADJ_8: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 8)
+ _MM_EXPADJ_16: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 16)
+ _MM_EXPADJ_24: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 24)
+ _MM_EXPADJ_31: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 31)
+ _MM_EXPADJ_32: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 32)
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDFXPNTPD" form="zmm, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_roundfxpnt_adjust_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <parameter type="_MM_EXP_ADJ_ENUM" varname="expadj" etype="UI64"/>
+ <description>Performs element-by-element rounding of packed double-precision (64-bit) floating-point elements in "a" using "expadj" and in the direction of "rounding" and stores results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).
+ [round_note]</description>
+ <operation>FOR j := 0 to 7
+ i := j*64
+ IF k[j]
+ dst[i+63:i] := ROUND(a[i+63:i])
+ CASE expadj OF
+ _MM_EXPADJ_NONE: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 0)
+ _MM_EXPADJ_4: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 4)
+ _MM_EXPADJ_5: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 5)
+ _MM_EXPADJ_8: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 8)
+ _MM_EXPADJ_16: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 16)
+ _MM_EXPADJ_24: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 24)
+ _MM_EXPADJ_31: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 31)
+ _MM_EXPADJ_32: dst[i+31:i] := dst[i+31:i] * (2 &lt;&lt; 32)
+ ESAC
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRNDFXPNTPD" form="zmm {k}, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_rsqrt23_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Calculates the reciprocal square root of packed single-precision (32-bit) floating-point elements in "a" to 23 bits of accuracy and stores the result in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := Sqrt(1.0 / a[i+31:i])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRSQRT23PS" form="zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_rsqrt23_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Calculates the reciprocal square root of packed single-precision (32-bit) floating-point elements in "a" to 23 bits of accuracy and stores the result in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := Sqrt(1.0 / a[i+31:i])
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VRSQRT23PS" form="zmm {k}, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_scale_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Scales each single-precision (32-bit) floating-point element in "a" by multiplying it by 2**exponent, where the exponent is the corresponding 32-bit integer element in "b", storing results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] * POW(2.0, FP32(b[i+31:i]))
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEPS" form="zmm, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_scale_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <description>Scales each single-precision (32-bit) floating-point element in "a" by multiplying it by 2**exponent, where the exponent is the corresponding 32-bit integer element in "b", storing results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] * POW(2.0, FP32(b[i+31:i]))
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEPS" form="zmm {k}, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_scale_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scales each single-precision (32-bit) floating-point element in "a" by multiplying it by 2**exponent, where the exponent is the corresponding 32-bit integer element in "b", storing results in "dst". Intermediate elements are rounded using "rounding".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ dst[i+31:i] := a[i+31:i] * POW(2.0,FP32(b[i+31:i]))
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEPS" form="zmm, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_scale_round_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512i" varname="b" etype="UI32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Scales each single-precision (32-bit) floating-point element in "a" by multiplying it by 2**exp, where the exp is the corresponding 32-bit integer element in "b", storing results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). Results are rounded using constant "rounding".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ dst[i+31:i] := a[i+31:i] * POW(2.0, FP32(b[i+31:i]))
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VSCALEPS" form="zmm {k}, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_reduce_gmin_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Determines the minimum element of the packed single-precision (32-bit) floating-point elements stored in "a" and stores the result in "dst".</description>
+ <operation>min := a[31:0]
+FOR j := 1 to 15
+ i := j*32
+ dst := FpMin(min, a[i+31:i])
+ENDFOR
+dst := min
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_reduce_gmin_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Determines the minimum element of the packed single-precision (32-bit) floating-point elements stored in "a" and stores the result in "dst" using writemask "k" (elements are ignored when the corresponding mask bit is not set).</description>
+ <operation>min := a[31:0]
+FOR j := 1 to 15
+ i := j*32
+ IF k[j]
+ CONTINUE
+ ELSE
+ dst := FpMin(min, a[i+31:i])
+ FI
+ENDFOR
+dst := min
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_reduce_gmin_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Determines the minimum element of the packed double-precision (64-bit) floating-point elements stored in "a" and stores the result in "dst".</description>
+ <operation>min := a[63:0]
+FOR j := 1 to 7
+ i := j*64
+ dst := FpMin(min, a[i+63:i])
+ENDFOR
+dst := min
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_reduce_gmin_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Determines the minimum element of the packed double-precision (64-bit) floating-point elements stored in "a" and stores the result in "dst". Bitmask "k" is used to exclude certain elements (elements are ignored when the corresponding mask bit is not set).</description>
+ <operation>min := a[63:0]
+FOR j := 1 to 7
+ i := j*64
+ IF k[j]
+ CONTINUE
+ ELSE
+ dst := FpMin(min, a[i+63:i])
+ FI
+ENDFOR
+dst := min
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_reduce_gmax_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Determines the maximum element of the packed single-precision (32-bit) floating-point elements stored in "a" and stores the result in "dst".</description>
+ <operation>max := a[31:0]
+FOR j := 1 to 15
+ i := j*32
+ dst := FpMax(max, a[i+31:i])
+ENDFOR
+dst := max
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_reduce_gmax_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <description>Determines the maximum element of the packed single-precision (32-bit) floating-point elements stored in "a" and stores the result in "dst". Bitmask "k" is used to exclude certain elements (elements are ignored when the corresponding mask bit is not set).</description>
+ <operation>max := a[31:0]
+FOR j := 1 to 15
+ i := j*32
+ IF k[j]
+ CONTINUE
+ ELSE
+ dst := FpMax(max, a[i+31:i])
+ FI
+ENDFOR
+dst := max
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_reduce_gmax_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Determines the maximum element of the packed double-precision (64-bit) floating-point elements stored in "a" and stores the result in "dst".</description>
+ <operation>max := a[63:0]
+FOR j := 1 to 7
+ i := j*64
+ dst := FpMax(max, a[i+63:i])
+ENDFOR
+dst := max
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_reduce_gmax_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Special Math Functions</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <description>Determines the maximum element of the packed double-precision (64-bit) floating-point elements stored in "a" and stores the result in "dst". Bitmask "k" is used to exclude certain elements (elements are ignored when the corresponding mask bit is not set).</description>
+ <operation>max := a[63:0]
+FOR j := 1 to 7
+ i := j*64
+ IF k[j]
+ CONTINUE
+ ELSE
+ dst := FpMax(max, a[i+63:i])
+ FI
+ENDFOR
+dst := max
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm_tzcnti_32">
+ <CPUID>KNCNI</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="a" etype="SI32"/>
+ <parameter type="unsigned int" varname="x" etype="UI32"/>
+ <description>Count the number of trailing zero bits in unsigned 32-bit integer "x" starting at bit "a", and return that count in "dst".</description>
+ <operation>
+tmp := a
+IF tmp &lt; 0
+ tmp := 0
+FI
+dst := 0
+IF tmp &gt; 31
+ dst := 32
+ELSE
+ DO WHILE ((tmp &lt; 32) AND x[tmp] == 0)
+ tmp := tmp + 1
+ dst := dst + 1
+ OD
+FI
+ </operation>
+ <instruction name="TZCNTI" form="r32, r32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm_tzcnti_64">
+ <CPUID>KNCNI</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="a" etype="SI64"/>
+ <parameter type="unsigned __int64" varname="x" etype="UI64"/>
+ <description>Count the number of trailing zero bits in unsigned 64-bit integer "x" starting at bit "a", and return that count in "dst".</description>
+ <operation>
+tmp := a
+IF tmp &lt; 0
+ tmp := 0
+FI
+dst := 0
+IF tmp &gt; 63
+ dst := 64
+ELSE
+ DO WHILE ((tmp &lt; 64) AND x[tmp] == 0)
+ tmp := tmp + 1
+ dst := dst + 1
+ OD
+FI
+ </operation>
+ <instruction name="TZCNTI" form="r64, r64"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm_delay_32">
+ <CPUID>KNCNI</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="r1" etype="UI32"/>
+ <description>Stalls a thread without blocking other threads for 32-bit unsigned integer "r1" clock cycles.</description>
+ <operation>BlockThread(r1)
+ </operation>
+ <instruction name="DELAY" form="r32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm_delay_64">
+ <CPUID>KNCNI</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned __int64" varname="r1" etype="UI64"/>
+ <description>Stalls a thread without blocking other threads for 64-bit unsigned integer "r1" clock cycles.</description>
+ <operation>BlockThread(r1)
+ </operation>
+ <instruction name="DELAY" form="r64"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm_spflt_32">
+ <CPUID>KNCNI</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="r1" etype="UI32"/>
+ <description>Set performance monitoring filtering mask to 32-bit unsigned integer "r1".</description>
+ <operation>SetPerfMonMask(r1[31:0])
+ </operation>
+ <instruction name="SPFLT" form="r32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm_spflt_64">
+ <CPUID>KNCNI</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned __int64" varname="r1" etype="UI64"/>
+ <description>Set performance monitoring filtering mask to 64-bit unsigned integer "r1".</description>
+ <operation>SetPerfMonMask(r1[63:0])
+ </operation>
+ <instruction name="SPFLT" form="r64"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm_clevict">
+ <CPUID>KNCNI</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="const void *" varname="ptr"/>
+ <parameter type="int" varname="level" etype="UI32"/>
+ <description>Evicts the cache line containing the address "ptr" from cache level "level" (can be either 0 or 1).</description>
+ <operation>CacheLineEvict(ptr, level)
+ </operation>
+ <instruction name="CLEVICT0" form="m8"/>
+ <instruction name="CLEVICT1" form="m8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kandnr">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="dst" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <description>Performs a bitwise AND operation between NOT of "k2" and "k1", storing the result in "dst".</description>
+ <operation>dst[15:0] := NOT(k2[15:0]) &amp; k1[15:0]
+ </operation>
+ <instruction name="KANDNR" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kswapb">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <description>Moves high byte from "k2" to low byte of "k1", and moves low byte of "k2" to high byte of "k1".</description>
+ <operation>
+tmp[7:0] := k2[15:8]
+k2[15:8] := k1[7:0]
+k1[7:0] := tmp[7:0]
+tmp[7:0] := k2[7:0]
+k2[7:0] := k1[15:8]
+k1[15:8] := tmp[7:0]
+ </operation>
+ <instruction name="KMERGE2L1H" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kortestz">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <description>Performs bitwise OR between "k1" and "k2", storing the result in "dst". ZF flag is set if "dst" is 0.</description>
+ <operation>dst[15:0] := k1[15:0] | k2[15:0]
+IF dst == 0
+ SetZF()
+FI
+ </operation>
+ <instruction name="KORTEST" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kortestc">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <description>Performs bitwise OR between "k1" and "k2", storing the result in "dst". CF flag is set if "dst" consists of all 1's.</description>
+ <operation>dst[15:0] := k1[15:0] | k2[15:0]
+IF PopCount(dst[15:0]) == 16
+ SetCF()
+FI
+ </operation>
+ <instruction name="KORTEST" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask2int">
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <description>Converts bit mask "k1" into an integer value, storing the results in "dst".</description>
+ <operation>
+dst := ZeroExtend32(k1)
+ </operation>
+ <instruction name="KMOV" form="r32, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_int2mask">
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="dst" etype="MASK"/>
+ <parameter type="int" varname="mask" etype="UI16"/>
+ <description>Converts integer "mask" into bitmask, storing the result in "dst".</description>
+ <operation>
+dst := mask[15:0]
+ </operation>
+ <instruction name="KMOV" form="k, r32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kconcathi_64">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <description>Packs masks "k1" and "k2" into the high 32 bits of "dst". The rest of "dst" is set to 0.</description>
+ <operation>
+dst[63:48] := k1[15:0]
+dst[47:32] := k2[15:0]
+dst[31:0] := 0
+ </operation>
+ <instruction name="KCONCATH" form="r64, k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kconcatlo_64">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <description>Packs masks "k1" and "k2" into the low 32 bits of "dst". The rest of "dst" is set to 0.</description>
+ <operation>
+dst[31:16] := k1[15:0]
+dst[15:0] := k2[15:0]
+dst[63:32] := 0
+ </operation>
+ <instruction name="KCONCATL" form="r64, k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kextract_64">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="dst" etype="MASK"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="b" etype="UI32"/>
+ <description>Extracts 16-bit value "b" from 64-bit integer "a", storing the result in "dst".</description>
+ <operation>
+CASE b[1:0] OF
+0: dst[15:0] := a[63:48]
+1: dst[15:0] := a[47:32]
+2: dst[15:0] := a[31:16]
+3: dst[15:0] := a[15:0]
+ESAC
+dst[MAX:15] := 0
+ </operation>
+ <instruction name="KEXTRACT" form="k, r64, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_fmadd233_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ base := (j &amp; ~0x3) * 32
+ scale[31:0] := b[base+63:base+32]
+ bias[31:0] := b[base+31:base]
+ dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0]
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD233PS" form="zmm, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_fmadd233_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="__m512" varname="b" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in each 4-element set of "a" and by element 1 of the corresponding 4-element set from "b", add the intermediate result to element 0 of the corresponding 4-element set from "b", and store the results in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*32
+ IF k[j]
+ base := (j &amp; ~0x3) * 32
+ scale[31:0] := b[base+63:base+32]
+ bias[31:0] := b[base+31:base]
+ dst[i+31:i] := (a[i+31:i] * scale[31:0]) + bias[31:0]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VFMADD233PS" form="zmm {k}, zmm, m512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64extgather_epi32lo">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI32"/>
+ <parameter type="_MM_UPCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 single-precision (32-bit) memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" using "conv" to 32-bit integer elements and stores them in "dst". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE: dst[i+31:i] := MEM[addr+31:addr]
+ _MM_UPCONV_EPI32_UINT8: dst[i+31:i] := ZeroExtend32(MEM[addr+7:addr])
+ _MM_UPCONV_EPI32_SINT8: dst[i+31:i] := SignExtend32(MEM[addr+7:addr])
+ _MM_UPCONV_EPI32_UINT16: dst[i+31:i] := ZeroExtend32(MEM[addr+15:addr])
+ _MM_UPCONV_EPI32_SINT16: dst[i+31:i] := SignExtend32(MEM[addr+15:addr])
+ ESAC
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64extgather_epi32lo">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI32"/>
+ <parameter type="_MM_UPCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 single-precision (32-bit) memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" using "conv" to 32-bit integer elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_UPCONV_EPI32_NONE: dst[i+31:i] := MEM[addr+31:addr]
+ _MM_UPCONV_EPI32_UINT8: dst[i+31:i] := ZeroExtend32(MEM[addr+7:addr])
+ _MM_UPCONV_EPI32_SINT8: dst[i+31:i] := SignExtend32(MEM[addr+7:addr])
+ _MM_UPCONV_EPI32_UINT16: dst[i+31:i] := ZeroExtend32(MEM[addr+15:addr])
+ _MM_UPCONV_EPI32_SINT16: dst[i+31:i] := SignExtend32(MEM[addr+15:addr])
+ ESAC
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64extgather_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI64"/>
+ <parameter type="_MM_UPCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 double-precision (64-bit) memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" using "conv" to 64-bit integer elements and stores them in "dst". "hint" indicates to the processor whether the load is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE: dst[i+63:i] := MEM[addr+63:addr]
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64extgather_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI64"/>
+ <parameter type="__m512i" varname="src" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const*" varname="base_addr" etype="UI64"/>
+ <parameter type="_MM_UPCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_EPI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 double-precision (64-bit) memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" using "conv" to 64-bit integer elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the load is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_UPCONV_EPI64_NONE: dst[i+63:i] := MEM[addr+63:addr]
+ ESAC
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64extgather_pslo">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" using "conv" to single-precision (32-bit) floating-point elements and stores them in the lower half of "dst". "hint" indicates to the processor whether the load is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_UPCONV_PS_NONE: dst[i+31:i] := MEM[addr+31:addr]
+ _MM_UPCONV_PS_FLOAT16: dst[i+31:i] := Convert_FP16_To_FP32(MEM[addr+15:addr])
+ _MM_UPCONV_PS_UINT8: dst[i+31:i] := Convert_UInt8_To_FP32(MEM[addr+7:addr])
+ _MM_UPCONV_PS_SINT8: dst[i+31:i] := Convert_Int8_To_FP32(MEM[addr+7:addr])
+ _MM_UPCONV_PS_UINT16: dst[i+31:i] := Convert_UInt16_To_FP32(MEM[addr+15:addr])
+ _MM_UPCONV_PS_SINT16: dst[i+31:i] := Convert_Int16_To_FP32(MEM[addr+15:addr])
+ ESAC
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64extgather_pslo">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="FP32"/>
+ <parameter type="_MM_UPCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" using "conv" to single-precision (32-bit) floating-point elements and stores them in the lower half of "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the load is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_UPCONV_PS_NONE: dst[i+31:i] := MEM[addr+31:addr]
+ _MM_UPCONV_PS_FLOAT16: dst[i+31:i] := Convert_FP16_To_FP32(MEM[addr+15:addr])
+ _MM_UPCONV_PS_UINT8: dst[i+31:i] := Convert_UInt8_To_FP32(MEM[addr+7:addr])
+ _MM_UPCONV_PS_SINT8: dst[i+31:i] := Convert_Int8_To_FP32(MEM[addr+7:addr])
+ _MM_UPCONV_PS_UINT16: dst[i+31:i] := Convert_UInt16_To_FP32(MEM[addr+15:addr])
+ _MM_UPCONV_PS_SINT16: dst[i+31:i] := Convert_Int16_To_FP32(MEM[addr+15:addr])
+ ESAC
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64extgather_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="FP64"/>
+ <parameter type="_MM_UPCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PD"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 double-precision (64-bit) floating-point elements stored in memory starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" using "conv" to 64-bit floating-point elements and stores them in "dst". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_UPCONV_PD_NONE: dst[i+63:i] := MEM[addr+63:addr]
+ ESAC
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64extgather_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512d" varname="dst" etype="FP64"/>
+ <parameter type="__m512d" varname="src" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="FP64"/>
+ <parameter type="_MM_UPCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_UPCONV_PD"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Up-converts 8 double-precision (64-bit) floating-point elements stored in memory starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" using "conv" to 64-bit floating-point elements and stores them in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_UPCONV_PD_NONE: dst[i+63:i] := MEM[addr+63:addr]
+ ESAC
+ ELSE
+ dst[i+63:i] := src[i+63:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64extscatter_pslo">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 8 packed single-precision (32-bit) floating-point elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_DOWNCONV_PS_NONE: MEM[addr+31:addr] := a[i+31:i]
+ _MM_DOWNCONV_PS_FLOAT16: MEM[addr+15:addr] := Convert_FP32_To_FP16(a[i+31:i])
+ _MM_DOWNCONV_PS_UINT8: MEM[addr+ 7:addr] := Convert_FP32_To_UInt8(a[i+31:i])
+ _MM_DOWNCONV_PS_SINT8: MEM[addr+ 7:addr] := Convert_FP32_To_Int8(a[i+31:i])
+ _MM_DOWNCONV_PS_UINT16: MEM[addr+15:addr] := Convert_FP32_To_UInt16(a[i+31:i])
+ _MM_DOWNCONV_PS_SINT16: MEM[addr+15:addr] := Convert_FP32_To_Int16(a[i+31:i])
+ ESAC
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64extscatter_pslo">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_DOWNCONV_PS_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PS"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 8 packed single-precision (32-bit) floating-point elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale". Elements are only written when the corresponding mask bit is set in "k"; otherwise, elements are unchanged in memory. "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_DOWNCONV_PS_NONE: MEM[addr+31:addr] := a[i+31:i]
+ _MM_DOWNCONV_PS_FLOAT16: MEM[addr+15:addr] := Convert_FP32_To_FP16(a[i+31:i])
+ _MM_DOWNCONV_PS_UINT8: MEM[addr+ 7:addr] := Convert_FP32_To_UInt8(a[i+31:i])
+ _MM_DOWNCONV_PS_SINT8: MEM[addr+ 7:addr] := Convert_FP32_To_Int8(a[i+31:i])
+ _MM_DOWNCONV_PS_UINT16: MEM[addr+15:addr] := Convert_FP32_To_UInt16(a[i+31:i])
+ _MM_DOWNCONV_PS_SINT16: MEM[addr+15:addr] := Convert_FP32_To_Int16(a[i+31:i])
+ ESAC
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64extscatter_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 8 packed double-precision (64-bit) floating-point elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_DOWNCONV_EPI64_NONE: MEM[addr+63:addr] := a[i+63:i]
+ ESAC
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64extscatter_pd">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512d" varname="a" etype="FP64"/>
+ <parameter type="_MM_DOWNCONV_PD_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_PD"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 8 packed double-precision (64-bit) floating-point elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale". Elements are written to memory using writemask "k" (elements are not stored to memory when the corresponding mask bit is not set; the memory location is left unchagned). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_DOWNCONV_EPI64_NONE: MEM[addr+63:addr] := a[i+63:i]
+ ESAC
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64extscatter_epi32lo">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="UI32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts the low 8 packed 32-bit integer elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale". "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_DOWNCONV_EPI32_NONE: MEM[addr+31:addr] := a[i+31:i]
+ _MM_DOWNCONV_EPI32_UINT8: MEM[addr+ 7:addr] := Truncate8(a[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT8: MEM[addr+ 7:addr] := Saturate8(a[i+31:i])
+ _MM_DOWNCONV_EPI32_UINT16: MEM[addr+15:addr] := Truncate16(a[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT16: MEM[addr+15:addr] := Saturate16(a[i+31:i])
+ ESAC
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64extscatter_epi32lo">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="UI32"/>
+ <parameter type="_MM_DOWNCONV_EPI32_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts the low 8 packed 32-bit integer elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale". Elements are written to memory using writemask "k" (elements are only written when the corresponding mask bit is set; otherwise, the memory location is left unchanged). "hint" indicates to the processor whether the data is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_DOWNCONV_EPI32_NONE: MEM[addr+31:addr] := a[i+31:i]
+ _MM_DOWNCONV_EPI32_UINT8: MEM[addr+ 7:addr] := Truncate8(a[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT8: MEM[addr+ 7:addr] := Saturate8(a[i+31:i])
+ _MM_DOWNCONV_EPI32_UINT16: MEM[addr+15:addr] := Truncate16(a[i+31:i])
+ _MM_DOWNCONV_EPI32_SINT16: MEM[addr+15:addr] := Saturate16(a[i+31:i])
+ ESAC
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64extscatter_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="UI64"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 8 packed 64-bit integer elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale". "hint" indicates to the processor whether the load is non-temporal.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ CASE conv OF
+ _MM_DOWNCONV_EPI64_NONE: MEM[addr+63:addr] := a[i+63:i]
+ ESAC
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64extscatter_epi64">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="UI64"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="UI64"/>
+ <parameter type="_MM_DOWNCONV_EPI64_ENUM" varname="conv" etype="IMM" immtype="_MM_DOWNCONV_EPI64"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <parameter type="int" varname="hint" etype="UI32" hint="TRUE" immtype="_MM_HINT_EXT"/>
+ <description>Down-converts 8 packed 64-bit integer elements in "a" using "conv" and stores them in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale". Only those elements whose corresponding mask bit is set in writemask "k" are written to memory.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*64
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+
+ IF k[j]
+ CASE conv OF
+ _MM_DOWNCONV_EPI64_NONE: MEM[addr+63:addr] := a[i+63:i]
+ ESAC
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_permute4f128_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_PERM_ENUM" varname="imm8" etype="IMM" immtype="_MM_PERM"/>
+ <description>Permutes 128-bit blocks of the packed single-precision (32-bit) floating-point elements in "a" using constant "imm8". The results are stored in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE control[1:0] OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+FOR j := 0 to 3
+ i := j*128
+ n := j*2
+ dst[i+127:i] := SELECT4(a[511:0], imm8[n+1:n])
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMF32X4" form="zmm, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_mask_permute4f128_ps">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Swizzle</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="_MM_PERM_ENUM" varname="imm8" etype="IMM" immtype="_MM_PERM"/>
+ <description>Permutes 128-bit blocks of the packed single-precision (32-bit) floating-point elements in "a" using constant "imm8". The results are stored in "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE control[1:0] OF
+ 0: tmp[127:0] := src[127:0]
+ 1: tmp[127:0] := src[255:128]
+ 2: tmp[127:0] := src[383:256]
+ 3: tmp[127:0] := src[511:384]
+ ESAC
+ RETURN tmp[127:0]
+}
+tmp[511:0] := 0
+FOR j := 0 to 3
+ i := j*128
+ n := j*2
+ tmp[i+127:i] := SELECT4(a[511:0], imm8[n+1:n])
+ENDFOR
+FOR j := 0 to 15
+ IF k[j]
+ dst[i+31:i] := tmp[i+31:i]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPERMF32X4" form="zmm {k}, m512, imm8"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64gather_epi32lo">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="UI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Loads 8 32-bit integer memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" to "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64gather_epi32lo">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512i" varname="dst" etype="UI32"/>
+ <parameter type="__m512i" varname="src" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="UI32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Loads 8 32-bit integer memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64gather_pslo">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Loads 8 single-precision (32-bit) floating-point memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" to "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64gather_pslo">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Load</category>
+ <return type="__m512" varname="dst" etype="FP32"/>
+ <parameter type="__m512" varname="src" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="void const *" varname="base_addr" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Loads 8 single-precision (32-bit) floating-point memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" to "dst" using writemask "k" (elements are copied from "src" when the corresponding mask bit is not set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ dst[i+31:i] := MEM[addr+31:addr]
+ ELSE
+ dst[i+31:i] := src[i+31:i]
+ FI
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64scatter_pslo">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Stores 8 packed single-precision (32-bit) floating-point elements in "a" in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64scatter_pslo">
+ <type>Floating Point</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="FP32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512" varname="a" etype="FP32"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Stores 8 packed single-precision (32-bit) floating-point elements in "a" in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" using writemask "k" (elements are only written to memory when the corresponding mask bit is set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_i64scatter_epi32lo">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="UI32"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="M512"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Stores 8 packed 32-bit integer elements in "a" in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" sequence="TRUE" name="_mm512_mask_i64scatter_epi32lo">
+ <type>Integer</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="base_addr" etype="UI32"/>
+ <parameter type="__mmask8" varname="k" etype="MASK"/>
+ <parameter type="__m512i" varname="vindex" etype="SI64"/>
+ <parameter type="__m512i" varname="a" etype="M512"/>
+ <parameter type="int" varname="scale" etype="IMM" immtype="_MM_INDEX_SCALE"/>
+ <description>Stores 8 packed 32-bit integer elements in "a" in memory locations starting at location "base_addr" at packed 64-bit integer indices stored in "vindex" scaled by "scale" using writemask "k" (elements are only written to memory when the corresponding mask bit is set).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*32
+ m := j*64
+ IF k[j]
+ addr := base_addr + vindex[m+63:m] * ZeroExtend64(scale) * 8
+ MEM[addr+31:addr] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kmerge2l1h">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <description>Move the high element from "k1" to the low element of "k1", and insert the low element of "k2" into the high element of "k1".</description>
+ <operation>
+tmp[7:0] := k1[15:8]
+k1[15:8] := k2[7:0]
+k1[7:0] := tmp[7:0]
+ </operation>
+ <instruction name="KMERGE2L1H" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="KNC" name="_mm512_kmerge2l1l">
+ <type>Mask</type>
+ <CPUID>KNCNI</CPUID>
+ <category>Mask</category>
+ <return type="__mmask16" varname="k" etype="MASK"/>
+ <parameter type="__mmask16" varname="k1" etype="MASK"/>
+ <parameter type="__mmask16" varname="k2" etype="MASK"/>
+ <description>Insert the low element of "k2" into the high element of "k1".</description>
+ <operation>
+k1[15:8] := k2[7:0]
+ </operation>
+ <instruction name="KMERGE2L1L" form="k, k"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_lzcnt_u32">
+ <type>Integer</type>
+ <CPUID>LZCNT</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Count the number of leading zero bits in unsigned 32-bit integer "a", and return that count in "dst".</description>
+ <operation>
+tmp := 31
+dst := 0
+DO WHILE (tmp &gt;= 0 AND a[tmp] == 0)
+ tmp := tmp - 1
+ dst := dst + 1
+OD
+ </operation>
+ <instruction name="LZCNT" form="r32, r32" xed="LZCNT_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_lzcnt_u64">
+ <type>Integer</type>
+ <CPUID>LZCNT</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Count the number of leading zero bits in unsigned 64-bit integer "a", and return that count in "dst".</description>
+ <operation>
+tmp := 63
+dst := 0
+DO WHILE (tmp &gt;= 0 AND a[tmp] == 0)
+ tmp := tmp - 1
+ dst := dst + 1
+OD
+ </operation>
+ <instruction name="LZCNT" form="r64, r64" xed="LZCNT_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_from_int64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Copy 64-bit integer "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+ </operation>
+ <instruction name="MOVQ" form="mm, r64" xed="MOVQ_MMXq_GPR64"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_to_int64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m64" varname="a" etype="FP32"/>
+ <description>Copy 64-bit integer "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+ </operation>
+ <instruction name="MOVQ" form="r64, mm" xed="MOVQ_GPR64_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_empty">
+ <CPUID>MMX</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Empty the MMX state, which marks the x87 FPU registers as available for use by x87 instructions. This instruction must be used at the end of all MMX technology procedures.</description>
+ <instruction name="EMMS" xed="EMMS"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_from_int">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper element of "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+dst[63:32] := 0
+ </operation>
+ <instruction name="MOVD" form="mm, r32" xed="MOVD_MMXq_GPR32"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_to_int">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m64" varname="a" etype="FP32"/>
+ <description>Copy the lower 32-bit integer in "a" to "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+ </operation>
+ <instruction name="MOVD" form="r32, mm" xed="MOVD_GPR32_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_packsswb">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m64" varname="dst" etype="SI8"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst".</description>
+ <operation>
+dst[7:0] := Saturate8(a[15:0])
+dst[15:8] := Saturate8(a[31:16])
+dst[23:16] := Saturate8(a[47:32])
+dst[31:24] := Saturate8(a[63:48])
+dst[39:32] := Saturate8(b[15:0])
+dst[47:40] := Saturate8(b[31:16])
+dst[55:48] := Saturate8(b[47:32])
+dst[63:56] := Saturate8(b[63:48])
+ </operation>
+ <instruction name="PACKSSWB" form="mm, mm" xed="PACKSSWB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_packssdw">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m64" varname="dst" etype="SI16"/>
+ <parameter type="__m64" varname="a" etype="SI32"/>
+ <parameter type="__m64" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst".</description>
+ <operation>
+dst[15:0] := Saturate16(a[31:0])
+dst[31:16] := Saturate16(a[63:32])
+dst[47:32] := Saturate16(b[31:0])
+dst[63:48] := Saturate16(b[63:32])
+ </operation>
+ <instruction name="PACKSSDW" form="mm, mm" xed="PACKSSDW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_packuswb">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst".</description>
+ <operation>
+dst[7:0] := SaturateU8(a[15:0])
+dst[15:8] := SaturateU8(a[31:16])
+dst[23:16] := SaturateU8(a[47:32])
+dst[31:24] := SaturateU8(a[63:48])
+dst[39:32] := SaturateU8(b[15:0])
+dst[47:40] := SaturateU8(b[31:16])
+dst[55:48] := SaturateU8(b[47:32])
+dst[63:56] := SaturateU8(b[63:48])
+ </operation>
+ <instruction name="PACKUSWB" form="mm, mm" xed="PACKUSWB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_punpckhbw">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_BYTES(src1[63:0], src2[63:0]) {
+ dst[7:0] := src1[39:32]
+ dst[15:8] := src2[39:32]
+ dst[23:16] := src1[47:40]
+ dst[31:24] := src2[47:40]
+ dst[39:32] := src1[55:48]
+ dst[47:40] := src2[55:48]
+ dst[55:48] := src1[63:56]
+ dst[63:56] := src2[63:56]
+ RETURN dst[63:0]
+}
+dst[63:0] := INTERLEAVE_HIGH_BYTES(a[63:0], b[63:0])
+ </operation>
+ <instruction name="PUNPCKHBW" form="mm, mm" xed="PUNPCKHBW_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_punpckhwd">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_WORDS(src1[63:0], src2[63:0]) {
+ dst[15:0] := src1[47:32]
+ dst[31:16] := src2[47:32]
+ dst[47:32] := src1[63:48]
+ dst[63:48] := src2[63:48]
+ RETURN dst[63:0]
+}
+dst[63:0] := INTERLEAVE_HIGH_WORDS(a[63:0], b[63:0])
+ </operation>
+ <instruction name="PUNPCKLBW" form="mm, mm" xed="PUNPCKLBW_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_punpckhdq">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+dst[31:0] := a[63:32]
+dst[63:32] := b[63:32]
+ </operation>
+ <instruction name="PUNPCKHDQ" form="mm, mm" xed="PUNPCKHDQ_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_punpcklbw">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_BYTES(src1[63:0], src2[63:0]) {
+ dst[7:0] := src1[7:0]
+ dst[15:8] := src2[7:0]
+ dst[23:16] := src1[15:8]
+ dst[31:24] := src2[15:8]
+ dst[39:32] := src1[23:16]
+ dst[47:40] := src2[23:16]
+ dst[55:48] := src1[31:24]
+ dst[63:56] := src2[31:24]
+ RETURN dst[63:0]
+}
+dst[63:0] := INTERLEAVE_BYTES(a[63:0], b[63:0])
+ </operation>
+ <instruction name="PUNPCKLBW" form="mm, mm" xed="PUNPCKLBW_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_punpcklwd">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_WORDS(src1[63:0], src2[63:0]) {
+ dst[15:0] := src1[15:0]
+ dst[31:16] := src2[15:0]
+ dst[47:32] := src1[31:16]
+ dst[63:48] := src2[31:16]
+ RETURN dst[63:0]
+}
+dst[63:0] := INTERLEAVE_WORDS(a[63:0], b[63:0])
+ </operation>
+ <instruction name="PUNPCKLWD" form="mm, mm" xed="PUNPCKLWD_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_punpckldq">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+dst[63:32] := b[31:0]
+ </operation>
+ <instruction name="PUNPCKLDQ" form="mm, mm" xed="PUNPCKLDQ_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_paddb">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Add packed 8-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := a[i+7:i] + b[i+7:i]
+ENDFOR
+ </operation>
+ <instruction name="PADDB" form="mm, mm" xed="PADDB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_paddw">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Add packed 16-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := a[i+15:i] + b[i+15:i]
+ENDFOR
+ </operation>
+ <instruction name="PADDW" form="mm, mm" xed="PADDW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_paddd">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Add packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="PADDD" form="mm, mm" xed="PADDD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_paddsb">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="__m64" varname="b" etype="SI64"/>
+ <description>Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDSB" form="mm, mm" xed="PADDSB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_paddsw">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="__m64" varname="b" etype="SI64"/>
+ <description>Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDSW" form="mm, mm" xed="PADDSW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_paddusb">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDUSB" form="mm, mm" xed="PADDUSB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_paddusw">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDUSW" form="mm, mm" xed="PADDUSW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psubb">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := a[i+7:i] - b[i+7:i]
+ENDFOR
+ </operation>
+ <instruction name="PSUBB" form="mm, mm" xed="PSUBB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psubw">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := a[i+15:i] - b[i+15:i]
+ENDFOR
+ </operation>
+ <instruction name="PSUBW" form="mm, mm" xed="PSUBW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psubd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="PSUBD" form="mm, mm" xed="PSUBD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psubsb">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="__m64" varname="b" etype="SI64"/>
+ <description>Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBSB" form="mm, mm" xed="PSUBSB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psubsw">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="__m64" varname="b" etype="SI64"/>
+ <description>Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBSW" form="mm, mm" xed="PSUBSW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psubusb">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBUSB" form="mm, mm" xed="PSUBUSB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psubusw">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBUSW" form="mm, mm" xed="PSUBUSW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pmaddwd">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="__m64" varname="b" etype="SI64"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PMADDWD" form="mm, mm" xed="PMADDWD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pmulhw">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="__m64" varname="b" etype="SI64"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+ </operation>
+ <instruction name="PMULHW" form="mm, mm" xed="PMULHW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pmullw">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[15:0]
+ENDFOR
+ </operation>
+ <instruction name="PMULLW" form="mm, mm" xed="PMULLW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psllw">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="count" etype="UI64"/>
+ <description>Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLW" form="mm, mm" xed="PSLLW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psllwi">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLW" form="mm, imm8" xed="PSLLW_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pslld">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="count" etype="UI64"/>
+ <description>Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLD" form="mm, mm" xed="PSLLD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pslldi">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLD" form="mm, imm8" xed="PSLLD_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psllq">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="count" etype="UI64"/>
+ <description>Shift 64-bit integer "a" left by "count" while shifting in zeros, and store the result in "dst".</description>
+ <operation>
+IF count[63:0] &gt; 63
+ dst[63:0] := 0
+ELSE
+ dst[63:0] := ZeroExtend64(a[63:0] &lt;&lt; count[63:0])
+FI
+ </operation>
+ <instruction name="PSLLQ" form="mm, mm" xed="PSLLQ_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psllqi">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift 64-bit integer "a" left by "imm8" while shifting in zeros, and store the result in "dst".</description>
+ <operation>
+IF imm8[7:0] &gt; 63
+ dst[63:0] := 0
+ELSE
+ dst[63:0] := ZeroExtend64(a[63:0] &lt;&lt; imm8[7:0])
+FI
+ </operation>
+ <instruction name="PSLLQ" form="mm, imm8" xed="PSLLQ_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psraw">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="count" etype="UI64"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAW" form="mm, mm" xed="PSRAW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psrawi">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAW" form="mm, imm8" xed="PSRAW_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psrad">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="__m64" varname="count" etype="UI64"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAD" form="mm, mm" xed="PSRAD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psradi">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAD" form="mm, imm8" xed="PSRAD_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psrlw">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="count" etype="UI64"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLW" form="mm, mm" xed="PSRLW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psrlwi">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLW" form="mm, imm8" xed="PSRLW_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psrld">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="count" etype="UI64"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLD" form="mm, mm" xed="PSRLD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psrldi">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLD" form="mm, imm8" xed="PSRLD_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psrlq">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="count" etype="UI64"/>
+ <description>Shift 64-bit integer "a" right by "count" while shifting in zeros, and store the result in "dst".</description>
+ <operation>
+IF count[63:0] &gt; 63
+ dst[63:0] := 0
+ELSE
+ dst[63:0] := ZeroExtend64(a[63:0] &gt;&gt; count[63:0])
+FI
+ </operation>
+ <instruction name="PSRLQ" form="mm, mm" xed="PSRLQ_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_psrlqi">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift 64-bit integer "a" right by "imm8" while shifting in zeros, and store the result in "dst".</description>
+ <operation>
+IF imm8[7:0] &gt; 63
+ dst[63:0] := 0
+ELSE
+ dst[63:0] := ZeroExtend64(a[63:0] &gt;&gt; imm8[7:0])
+FI
+ </operation>
+ <instruction name="PSRLQ" form="mm, imm8" xed="PSRLQ_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pand">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Logical</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of 64 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] AND b[63:0])
+ </operation>
+ <instruction name="PAND" form="mm, mm" xed="PAND_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pandn">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Logical</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NOT of 64 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := ((NOT a[63:0]) AND b[63:0])
+ </operation>
+ <instruction name="PANDN" form="mm, mm" xed="PANDN_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_por">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Logical</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Compute the bitwise OR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] OR b[63:0])
+ </operation>
+ <instruction name="POR" form="mm, mm" xed="POR_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pxor">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Logical</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Compute the bitwise XOR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] XOR b[63:0])
+ </operation>
+ <instruction name="PXOR" form="mm, mm" xed="PXOR_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pcmpeqb">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPEQB" form="mm, mm" xed="PCMPEQB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pcmpeqw">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPEQW" form="mm, mm" xed="PCMPEQW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pcmpeqd">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPEQD" form="mm, mm" xed="PCMPEQD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pcmpgtb">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="__m64" varname="b" etype="SI64"/>
+ <description>Compare packed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := ( a[i+7:i] &gt; b[i+7:i] ) ? 0xFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTB" form="mm, mm" xed="PCMPGTB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pcmpgtw">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="__m64" varname="b" etype="SI64"/>
+ <description>Compare packed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := ( a[i+15:i] &gt; b[i+15:i] ) ? 0xFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTW" form="mm, mm" xed="PCMPGTW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_m_pcmpgtd">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI64"/>
+ <parameter type="__m64" varname="b" etype="SI64"/>
+ <description>Compare packed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] &gt; b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTD" form="mm, mm" xed="PCMPGTD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_empty">
+ <CPUID>MMX</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Empty the MMX state, which marks the x87 FPU registers as available for use by x87 instructions. This instruction must be used at the end of all MMX technology procedures.</description>
+ <instruction name="EMMS" xed="EMMS"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_add_pi8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Add packed 8-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := a[i+7:i] + b[i+7:i]
+ENDFOR
+ </operation>
+ <instruction name="PADDB" form="mm, mm" xed="PADDB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_add_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Add packed 16-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := a[i+15:i] + b[i+15:i]
+ENDFOR
+ </operation>
+ <instruction name="PADDW" form="mm, mm" xed="PADDW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_add_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="__m64" varname="b" etype="UI32"/>
+ <description>Add packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="PADDD" form="mm, mm" xed="PADDD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_adds_pi8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI8"/>
+ <parameter type="__m64" varname="b" etype="SI8"/>
+ <description>Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDSB" form="mm, mm" xed="PADDSB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_adds_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDSW" form="mm, mm" xed="PADDSW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_adds_pu8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDUSB" form="mm, mm" xed="PADDUSB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_adds_pu16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDUSW" form="mm, mm" xed="PADDUSW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_sub_pi8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := a[i+7:i] - b[i+7:i]
+ENDFOR
+ </operation>
+ <instruction name="PSUBB" form="mm, mm" xed="PSUBB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_sub_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := a[i+15:i] - b[i+15:i]
+ENDFOR
+ </operation>
+ <instruction name="PSUBW" form="mm, mm" xed="PSUBW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_sub_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="__m64" varname="b" etype="UI32"/>
+ <description>Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="PSUBD" form="mm, mm" xed="PSUBD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_subs_pi8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI8"/>
+ <parameter type="__m64" varname="b" etype="SI8"/>
+ <description>Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBSB" form="mm, mm" xed="PSUBSB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_subs_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBSW" form="mm, mm" xed="PSUBSW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_subs_pu8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBUSB" form="mm, mm" xed="PSUBUSB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_subs_pu16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBUSW" form="mm, mm" xed="PSUBUSW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_madd_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PMADDWD" form="mm, mm" xed="PMADDWD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_mulhi_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+ </operation>
+ <instruction name="PMULHW" form="mm, mm" xed="PMULHW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_mullo_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[15:0]
+ENDFOR
+ </operation>
+ <instruction name="PMULLW" form="mm, mm" xed="PMULLW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_sll_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLW" form="mm, mm" xed="PSLLW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_slli_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLW" form="mm, imm8" xed="PSLLW_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_sll_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="__m64" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLD" form="mm, mm" xed="PSLLD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_slli_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLD" form="mm, imm8" xed="PSLLD_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_sll_si64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="count" etype="UI64"/>
+ <description>Shift 64-bit integer "a" left by "count" while shifting in zeros, and store the result in "dst".</description>
+ <operation>
+IF count[63:0] &gt; 63
+ dst[63:0] := 0
+ELSE
+ dst[63:0] := ZeroExtend64(a[63:0] &lt;&lt; count[63:0])
+FI
+ </operation>
+ <instruction name="PSLLQ" form="mm, mm" xed="PSLLQ_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_slli_si64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift 64-bit integer "a" left by "imm8" while shifting in zeros, and store the result in "dst".</description>
+ <operation>
+IF imm8[7:0] &gt; 63
+ dst[63:0] := 0
+ELSE
+ dst[63:0] := ZeroExtend64(a[63:0] &lt;&lt; imm8[7:0])
+FI
+ </operation>
+ <instruction name="PSLLQ" form="mm, imm8" xed="PSLLQ_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_sra_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAW" form="mm, mm" xed="PSRAW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_srai_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAW" form="mm, imm8" xed="PSRAW_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_sra_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="__m64" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAD" form="mm, mm" xed="PSRAD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_srai_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAD" form="mm, imm8" xed="PSRAD_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_srl_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLW" form="mm, mm" xed="PSRLW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_srli_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLW" form="mm, imm8" xed="PSRLW_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_srl_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="__m64" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLD" form="mm, mm" xed="PSRLD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_srli_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLD" form="mm, imm8" xed="PSRLD_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_srl_si64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="count" etype="UI64"/>
+ <description>Shift 64-bit integer "a" right by "count" while shifting in zeros, and store the result in "dst".</description>
+ <operation>
+IF count[63:0] &gt; 63
+ dst[63:0] := 0
+ELSE
+ dst[63:0] := ZeroExtend64(a[63:0] &gt;&gt; count[63:0])
+FI
+ </operation>
+ <instruction name="PSRLQ" form="mm, mm" xed="PSRLQ_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_srli_si64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Shift</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift 64-bit integer "a" right by "imm8" while shifting in zeros, and store the result in "dst".</description>
+ <operation>
+IF imm8[7:0] &gt; 63
+ dst[63:0] := 0
+ELSE
+ dst[63:0] := ZeroExtend64(a[63:0] &gt;&gt; imm8[7:0])
+FI
+ </operation>
+ <instruction name="PSRLQ" form="mm, imm8" xed="PSRLQ_MMXq_IMMb"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_and_si64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Logical</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Compute the bitwise AND of 64 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] AND b[63:0])
+ </operation>
+ <instruction name="PAND" form="mm, mm" xed="PAND_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_andnot_si64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Logical</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Compute the bitwise NOT of 64 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := ((NOT a[63:0]) AND b[63:0])
+ </operation>
+ <instruction name="PANDN" form="mm, mm" xed="PANDN_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_or_si64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Logical</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Compute the bitwise OR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] OR b[63:0])
+ </operation>
+ <instruction name="POR" form="mm, mm" xed="POR_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_xor_si64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Logical</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Compute the bitwise XOR of 64 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] XOR b[63:0])
+ </operation>
+ <instruction name="PXOR" form="mm, mm" xed="PXOR_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_cmpeq_pi8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPEQB" form="mm, mm" xed="PCMPEQB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_cmpeq_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPEQW" form="mm, mm" xed="PCMPEQW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_cmpeq_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="__m64" varname="b" etype="UI32"/>
+ <description>Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPEQD" form="mm, mm" xed="PCMPEQD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_cmpgt_pi8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI8"/>
+ <parameter type="__m64" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := ( a[i+7:i] &gt; b[i+7:i] ) ? 0xFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTB" form="mm, mm" xed="PCMPGTB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_cmpgt_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := ( a[i+15:i] &gt; b[i+15:i] ) ? 0xFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTW" form="mm, mm" xed="PCMPGTW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_cmpgt_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Compare</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI32"/>
+ <parameter type="__m64" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] &gt; b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTD" form="mm, mm" xed="PCMPGTD_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_cvtsi32_si64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper element of "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+dst[63:32] := 0
+ </operation>
+ <instruction name="MOVD" form="mm, r32" xed="MOVD_MMXq_GPR32"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_cvtsi64_si32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m64" varname="a" etype="FP32"/>
+ <description>Copy the lower 32-bit integer in "a" to "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+ </operation>
+ <instruction name="MOVD" form="r32, mm" xed="MOVD_GPR32_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_cvtm64_si64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m64" varname="a" etype="FP32"/>
+ <description>Copy 64-bit integer "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+ </operation>
+ <instruction name="MOVQ" form="r64, mm" xed="MOVQ_GPR64_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_cvtsi64_m64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Copy 64-bit integer "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+ </operation>
+ <instruction name="MOVQ" form="mm, r64" xed="MOVQ_MMXq_GPR64"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_setzero_si64">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Set</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m64 with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="PXOR" form="mm, mm" xed="PXOR_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" sequence="TRUE" name="_mm_set_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Set</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="int" varname="e1" etype="UI32"/>
+ <parameter type="int" varname="e0" etype="UI32"/>
+ <description>Set packed 32-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[31:0] := e0
+dst[63:32] := e1
+ </operation>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" sequence="TRUE" name="_mm_set_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Set</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="short" varname="e3" etype="UI16"/>
+ <parameter type="short" varname="e2" etype="UI16"/>
+ <parameter type="short" varname="e1" etype="UI16"/>
+ <parameter type="short" varname="e0" etype="UI16"/>
+ <description>Set packed 16-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[15:0] := e0
+dst[31:16] := e1
+dst[47:32] := e2
+dst[63:48] := e3
+ </operation>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" sequence="TRUE" name="_mm_set_pi8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Set</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="char" varname="e7" etype="UI8"/>
+ <parameter type="char" varname="e6" etype="UI8"/>
+ <parameter type="char" varname="e5" etype="UI8"/>
+ <parameter type="char" varname="e4" etype="UI8"/>
+ <parameter type="char" varname="e3" etype="UI8"/>
+ <parameter type="char" varname="e2" etype="UI8"/>
+ <parameter type="char" varname="e1" etype="UI8"/>
+ <parameter type="char" varname="e0" etype="UI8"/>
+ <description>Set packed 8-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[7:0] := e0
+dst[15:8] := e1
+dst[23:16] := e2
+dst[31:24] := e3
+dst[39:32] := e4
+dst[47:40] := e5
+dst[55:48] := e6
+dst[63:56] := e7
+ </operation>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" sequence="TRUE" name="_mm_set1_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Set</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Broadcast 32-bit integer "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+ </operation>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" sequence="TRUE" name="_mm_set1_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Set</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="short" varname="a" etype="UI16"/>
+ <description>Broadcast 16-bit integer "a" to all all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := a[15:0]
+ENDFOR
+ </operation>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" sequence="TRUE" name="_mm_set1_pi8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Set</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="char" varname="a" etype="UI8"/>
+ <description>Broadcast 8-bit integer "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := a[7:0]
+ENDFOR
+ </operation>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" sequence="TRUE" name="_mm_setr_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Set</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="int" varname="e1" etype="UI32"/>
+ <parameter type="int" varname="e0" etype="UI32"/>
+ <description>Set packed 32-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[31:0] := e1
+dst[63:32] := e0
+ </operation>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" sequence="TRUE" name="_mm_setr_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Set</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="short" varname="e3" etype="UI16"/>
+ <parameter type="short" varname="e2" etype="UI16"/>
+ <parameter type="short" varname="e1" etype="UI16"/>
+ <parameter type="short" varname="e0" etype="UI16"/>
+ <description>Set packed 16-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[15:0] := e3
+dst[31:16] := e2
+dst[47:32] := e1
+dst[63:48] := e0
+ </operation>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" sequence="TRUE" name="_mm_setr_pi8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Set</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="char" varname="e7" etype="UI8"/>
+ <parameter type="char" varname="e6" etype="UI8"/>
+ <parameter type="char" varname="e5" etype="UI8"/>
+ <parameter type="char" varname="e4" etype="UI8"/>
+ <parameter type="char" varname="e3" etype="UI8"/>
+ <parameter type="char" varname="e2" etype="UI8"/>
+ <parameter type="char" varname="e1" etype="UI8"/>
+ <parameter type="char" varname="e0" etype="UI8"/>
+ <description>Set packed 8-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[7:0] := e7
+dst[15:8] := e6
+dst[23:16] := e5
+dst[31:24] := e4
+dst[39:32] := e3
+dst[47:40] := e2
+dst[55:48] := e1
+dst[63:56] := e0
+ </operation>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_packs_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m64" varname="dst" etype="SI8"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst".</description>
+ <operation>
+dst[7:0] := Saturate8(a[15:0])
+dst[15:8] := Saturate8(a[31:16])
+dst[23:16] := Saturate8(a[47:32])
+dst[31:24] := Saturate8(a[63:48])
+dst[39:32] := Saturate8(b[15:0])
+dst[47:40] := Saturate8(b[31:16])
+dst[55:48] := Saturate8(b[47:32])
+dst[63:56] := Saturate8(b[63:48])
+ </operation>
+ <instruction name="PACKSSWB" form="mm, mm" xed="PACKSSWB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_packs_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m64" varname="dst" etype="SI16"/>
+ <parameter type="__m64" varname="a" etype="SI32"/>
+ <parameter type="__m64" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst".</description>
+ <operation>
+dst[15:0] := Saturate16(a[31:0])
+dst[31:16] := Saturate16(a[63:32])
+dst[47:32] := Saturate16(b[31:0])
+dst[63:48] := Saturate16(b[63:32])
+ </operation>
+ <instruction name="PACKSSDW" form="mm, mm" xed="PACKSSDW_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_packs_pu16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst".</description>
+ <operation>
+dst[7:0] := SaturateU8(a[15:0])
+dst[15:8] := SaturateU8(a[31:16])
+dst[23:16] := SaturateU8(a[47:32])
+dst[31:24] := SaturateU8(a[63:48])
+dst[39:32] := SaturateU8(b[15:0])
+dst[47:40] := SaturateU8(b[31:16])
+dst[55:48] := SaturateU8(b[47:32])
+dst[63:56] := SaturateU8(b[63:48])
+ </operation>
+ <instruction name="PACKUSWB" form="mm, mm" xed="PACKUSWB_MMXq_MMXq"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_unpackhi_pi8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_BYTES(src1[63:0], src2[63:0]) {
+ dst[7:0] := src1[39:32]
+ dst[15:8] := src2[39:32]
+ dst[23:16] := src1[47:40]
+ dst[31:24] := src2[47:40]
+ dst[39:32] := src1[55:48]
+ dst[47:40] := src2[55:48]
+ dst[55:48] := src1[63:56]
+ dst[63:56] := src2[63:56]
+ RETURN dst[63:0]
+}
+dst[63:0] := INTERLEAVE_HIGH_BYTES(a[63:0], b[63:0])
+ </operation>
+ <instruction name="PUNPCKHBW" form="mm, mm" xed="PUNPCKHBW_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_unpackhi_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_WORDS(src1[63:0], src2[63:0]) {
+ dst[15:0] := src1[47:32]
+ dst[31:16] := src2[47:32]
+ dst[47:32] := src1[63:48]
+ dst[63:48] := src2[63:48]
+ RETURN dst[63:0]
+}
+dst[63:0] := INTERLEAVE_HIGH_WORDS(a[63:0], b[63:0])
+ </operation>
+ <instruction name="PUNPCKLBW" form="mm, mm" xed="PUNPCKLBW_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_unpackhi_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="__m64" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+dst[31:0] := a[63:32]
+dst[63:32] := b[63:32]
+ </operation>
+ <instruction name="PUNPCKHDQ" form="mm, mm" xed="PUNPCKHDQ_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_unpacklo_pi8">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_BYTES(src1[63:0], src2[63:0]) {
+ dst[7:0] := src1[7:0]
+ dst[15:8] := src2[7:0]
+ dst[23:16] := src1[15:8]
+ dst[31:24] := src2[15:8]
+ dst[39:32] := src1[23:16]
+ dst[47:40] := src2[23:16]
+ dst[55:48] := src1[31:24]
+ dst[63:56] := src2[31:24]
+ RETURN dst[63:0]
+}
+dst[63:0] := INTERLEAVE_BYTES(a[63:0], b[63:0])
+ </operation>
+ <instruction name="PUNPCKLBW" form="mm, mm" xed="PUNPCKLBW_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_unpacklo_pi16">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_WORDS(src1[63:0], src2[63:0]) {
+ dst[15:0] := src1[15:0]
+ dst[31:16] := src2[15:0]
+ dst[47:32] := src1[31:16]
+ dst[63:48] := src2[31:16]
+ RETURN dst[63:0]
+}
+dst[63:0] := INTERLEAVE_WORDS(a[63:0], b[63:0])
+ </operation>
+ <instruction name="PUNPCKLWD" form="mm, mm" xed="PUNPCKLWD_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="MMX" name="_mm_unpacklo_pi32">
+ <type>Integer</type>
+ <CPUID>MMX</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="__m64" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+dst[63:32] := b[31:0]
+ </operation>
+ <instruction name="PUNPCKLDQ" form="mm, mm" xed="PUNPCKLDQ_MMXq_MMXd"/>
+ <header>mmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_monitor">
+ <CPUID>MONITOR</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void const*" varname="p"/>
+ <parameter type="unsigned" varname="extensions" etype="UI32"/>
+ <parameter type="unsigned" varname="hints" etype="UI32"/>
+ <description>Arm address monitoring hardware using the address specified in "p". A store to an address within the specified address range triggers the monitoring hardware. Specify optional extensions in "extensions", and optional hints in "hints".</description>
+ <instruction name="MONITOR" xed="MONITOR"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_mwait">
+ <CPUID>MONITOR</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned" varname="extensions" etype="UI32"/>
+ <parameter type="unsigned" varname="hints" etype="UI32"/>
+ <description>Hint to the processor that it can enter an implementation-dependent-optimized state while waiting for an event or store operation to the address range specified by MONITOR.</description>
+ <instruction name="MWAIT" xed="MWAIT"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_loadbe_i16">
+ <CPUID>MOVBE</CPUID>
+ <category>Load</category>
+ <return type="short" varname="dst" etype="UI16"/>
+ <parameter type="void const *" varname="ptr" etype="UI16" memwidth="16"/>
+ <description>Load 16 bits from memory, perform a byte swap operation, and store the result in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*8
+ dst[i+7:i] := MEM[ptr+15-i:ptr+8-i]
+ENDFOR
+ </operation>
+ <instruction name="MOVBE" form="r16, m16" xed="MOVBE_GPRv_MEMv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_loadbe_i32">
+ <CPUID>MOVBE</CPUID>
+ <category>Load</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="void const *" varname="ptr" etype="UI32" memwidth="32"/>
+ <description>Load 32 bits from memory, perform a byte swap operation, and store the result in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*8
+ dst[i+7:i] := MEM[ptr+31-i:ptr+24-i]
+ENDFOR
+ </operation>
+ <instruction name="MOVBE" form="r32, m32" xed="MOVBE_GPRv_MEMv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_loadbe_i64">
+ <CPUID>MOVBE</CPUID>
+ <category>Load</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="void const *" varname="ptr" etype="UI64" memwidth="64"/>
+ <description>Load 64 bits from memory, perform a byte swap operation, and store the result in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := MEM[ptr+63-i:ptr+56-i]
+ENDFOR
+ </operation>
+ <instruction name="MOVBE" form="r64, m64" xed="MOVBE_GPRv_MEMv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_storebe_i16">
+ <CPUID>MOVBE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="ptr" etype="UI16" memwidth="16"/>
+ <parameter type="short" varname="data" etype="UI16"/>
+ <description>Perform a bit swap operation of the 16 bits in "data", and store the results to memory.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*8
+ MEM[ptr+i+7:ptr+i] := data[15-i:8-i]
+ENDFOR
+ </operation>
+ <instruction name="MOVBE" form="m16, r16" xed="MOVBE_MEMv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_storebe_i32">
+ <CPUID>MOVBE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="ptr" etype="UI32" memwidth="32"/>
+ <parameter type="int" varname="data" etype="UI32"/>
+ <description>Perform a bit swap operation of the 32 bits in "data", and store the results to memory.</description>
+ <operation>
+addr := MEM[ptr]
+FOR j := 0 to 3
+ i := j*8
+ MEM[ptr+i+7:ptr+i] := data[31-i:24-i]
+ENDFOR
+ </operation>
+ <instruction name="MOVBE" form="m32, r32" xed="MOVBE_MEMv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_storebe_i64">
+ <CPUID>MOVBE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void *" varname="ptr" etype="UI64" memwidth="64"/>
+ <parameter type="__int64" varname="data" etype="UI64"/>
+ <description>Perform a bit swap operation of the 64 bits in "data", and store the results to memory.</description>
+ <operation>
+addr := MEM[ptr]
+FOR j := 0 to 7
+ i := j*8
+ MEM[ptr+i+7:ptr+i] := data[63-i:56-i]
+ENDFOR
+ </operation>
+ <instruction name="MOVBE" form="m64, r64" xed="MOVBE_MEMv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_movdir64b">
+ <CPUID>MOVDIR64B</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="dst" etype="M512" memwidth="512"/>
+ <parameter type="const void*" varname="src" etype="M512" memwidth="512"/>
+ <description>Move 64-byte (512-bit) value using direct store from source memory address "src" to destination memory address "dst".</description>
+ <operation>
+MEM[dst+511:dst] := MEM[src+511:src]
+ </operation>
+ <instruction name="MOVDIR64B" form="r64, m512" xed="MOVDIR64B_GPRa_MEM"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_directstoreu_u64">
+ <CPUID>MOVDIRI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="dst" etype="UI64" memwidth="64"/>
+ <parameter type="unsigned __int64" varname="val" etype="UI64"/>
+ <description>Store 64-bit integer from "val" into memory using direct store.</description>
+ <operation>
+MEM[dst+63:dst] := val[63:0]
+ </operation>
+ <instruction name="MOVDIRI" form="m64, r64" xed="MOVDIRI_MEMu64_GPR64u64"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_directstoreu_u32">
+ <CPUID>MOVDIRI</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="dst" etype="UI32" memwidth="32"/>
+ <parameter type="unsigned int" varname="val" etype="UI32"/>
+ <description>Store 32-bit integer from "val" into memory using direct store.</description>
+ <operation>
+MEM[dst+31:dst] := val[31:0]
+ </operation>
+ <instruction name="MOVDIRI" form="m32, r32" xed="MOVDIRI_MEMu32_GPR32u32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bnd_set_ptr_bounds">
+ <CPUID>MPX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void *"/>
+ <parameter type="const void *" varname="srcmem"/>
+ <parameter type="size_t" varname="size" etype="UI64"/>
+ <description>Make a pointer with the value of "srcmem" and bounds set to ["srcmem", "srcmem" + "size" - 1], and store the result in "dst".</description>
+ <operation>dst := srcmem
+dst.LB := srcmem.LB
+dst.UB := srcmem + size - 1
+ </operation>
+ <instruction name="BNDMK" form="bnd, m32" xed="BNDMK_BND_AGEN"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" sequence="TRUE" name="_bnd_narrow_ptr_bounds">
+ <CPUID>MPX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void *"/>
+ <parameter type="const void *" varname="q"/>
+ <parameter type="const void *" varname="r"/>
+ <parameter type="size_t" varname="size" etype="UI64"/>
+ <description>Narrow the bounds for pointer "q" to the intersection of the bounds of "r" and the bounds ["q", "q" + "size" - 1], and store the result in "dst".</description>
+ <operation>dst := q
+IF r.LB &gt; (q + size - 1) OR r.UB &lt; q
+ dst.LB := 1
+ dst.UB := 0
+ELSE
+ dst.LB := MAX(r.LB, q)
+ dst.UB := MIN(r.UB, (q + size - 1))
+FI
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" sequence="TRUE" name="_bnd_copy_ptr_bounds">
+ <CPUID>MPX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void *"/>
+ <parameter type="const void *" varname="q"/>
+ <parameter type="const void *" varname="r"/>
+ <description>Make a pointer with the value of "q" and bounds set to the bounds of "r" (e.g. copy the bounds of "r" to pointer "q"), and store the result in "dst".</description>
+ <operation>dst := q
+dst.LB := r.LB
+dst.UB := r.UB
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" sequence="TRUE" name="_bnd_init_ptr_bounds">
+ <CPUID>MPX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void *"/>
+ <parameter type="const void *" varname="q"/>
+ <description>Make a pointer with the value of "q" and open bounds, which allow the pointer to access the entire virtual address space, and store the result in "dst".</description>
+ <operation>dst := q
+dst.LB := 0
+dst.UB := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bnd_store_ptr_bounds">
+ <CPUID>MPX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="const void **" varname="ptr_addr"/>
+ <parameter type="const void *" varname="ptr_val"/>
+ <description>Stores the bounds of "ptr_val" pointer in memory at address "ptr_addr".</description>
+ <operation>MEM[ptr_addr].LB := ptr_val.LB
+MEM[ptr_addr].UB := ptr_val.UB
+ </operation>
+ <instruction name="BNDSTX" form="mib, bnd" xed="BNDSTX_MEMbnd64_BND"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bnd_chk_ptr_lbounds">
+ <CPUID>MPX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="const void *" varname="q"/>
+ <description>Checks if "q" is within its lower bound, and throws a #BR if not.</description>
+ <operation>IF q &lt; q.LB
+ #BR
+FI
+ </operation>
+ <instruction name="BNDCL" form="bnd, m64" xed="BNDCL_BND_AGEN"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bnd_chk_ptr_ubounds">
+ <CPUID>MPX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="const void *" varname="q"/>
+ <description>Checks if "q" is within its upper bound, and throws a #BR if not.</description>
+ <operation>IF q &gt; q.UB
+ #BR
+FI
+ </operation>
+ <instruction name="BNDCU" form="bnd, m64" xed="BNDCU_BND_AGEN"/>
+ <instruction name="BNDCN" form="bnd, m64" xed="BNDCN_BND_AGEN"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bnd_chk_ptr_bounds">
+ <CPUID>MPX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="const void *" varname="q"/>
+ <parameter type="size_t" varname="size" etype="UI64"/>
+ <description>Checks if ["q", "q" + "size" - 1] is within the lower and upper bounds of "q" and throws a #BR if not.</description>
+ <operation>IF (q + size - 1) &lt; q.LB OR (q + size - 1) &gt; q.UB
+ #BR
+FI
+ </operation>
+ <instruction name="BNDCU" form="bnd, m32" xed="BNDCU_BND_AGEN"/>
+ <instruction name="BNDCN" form="bnd, m32" xed="BNDCN_BND_AGEN"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" sequence="TRUE" name="_bnd_get_ptr_lbound">
+ <CPUID>MPX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="const void *"/>
+ <parameter type="const void *" varname="q"/>
+ <description>Return the lower bound of "q".</description>
+ <operation>dst := q.LB
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" sequence="TRUE" name="_bnd_get_ptr_ubound">
+ <CPUID>MPX</CPUID>
+ <category>Miscellaneous</category>
+ <return type="const void *"/>
+ <parameter type="const void *" varname="q"/>
+ <description>Return the upper bound of "q".</description>
+ <operation>dst := q.UB
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bit_scan_forward">
+ <type>Integer</type>
+ <category>Bit Manipulation</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Set "dst" to the index of the lowest set bit in 32-bit integer "a". If no bits are set in "a" then "dst" is undefined.</description>
+ <operation>
+tmp := 0
+IF a == 0
+ // dst is undefined
+ELSE
+ DO WHILE ((tmp &lt; 32) AND a[tmp] == 0)
+ tmp := tmp + 1
+ OD
+FI
+dst := tmp
+ </operation>
+ <instruction name="BSF" form="r32, r32" xed="BSF_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bit_scan_reverse">
+ <type>Integer</type>
+ <category>Bit Manipulation</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Set "dst" to the index of the highest set bit in 32-bit integer "a". If no bits are set in "a" then "dst" is undefined.</description>
+ <operation>
+tmp := 31
+IF a == 0
+ // dst is undefined
+ELSE
+ DO WHILE ((tmp &gt; 0) AND a[tmp] == 0)
+ tmp := tmp - 1
+ OD
+FI
+dst := tmp
+ </operation>
+ <instruction name="BSR" form="r32, r32" xed="BSR_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_BitScanForward">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned __int32*" varname="index" etype="UI32" memwidth="32"/>
+ <parameter type="unsigned __int32" varname="a" etype="UI32"/>
+ <description>Set "index" to the index of the lowest set bit in 32-bit integer "mask". If no bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is set to 1.</description>
+ <operation>
+tmp := 0
+IF a == 0
+ // MEM[index+31:index] is undefined
+ dst := 0
+ELSE
+ DO WHILE ((tmp &lt; 32) AND a[tmp] == 0)
+ tmp := tmp + 1
+ OD
+ MEM[index+31:index] := tmp
+ dst := (tmp == 31) ? 0 : 1
+FI
+ </operation>
+ <instruction name="BSF" form="r32, r32" xed="BSF_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_BitScanReverse">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned __int32*" varname="index" etype="UI32" memwidth="32"/>
+ <parameter type="unsigned __int32" varname="a" etype="UI32"/>
+ <description>Set "index" to the index of the highest set bit in 32-bit integer "mask". If no bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is set to 1.</description>
+ <operation>
+tmp := 31
+IF a == 0
+ // MEM[index+31:index] is undefined
+ dst := 0
+ELSE
+ DO WHILE ((tmp &gt; 0) AND a[tmp] == 0)
+ tmp := tmp - 1
+ OD
+ MEM[index+31:index] := tmp
+ dst := (tmp == 0) ? 0 : 1
+FI
+ </operation>
+ <instruction name="BSR" form="r32, r32" xed="BSR_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_BitScanForward64">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned __int32*" varname="index" etype="UI32" memwidth="32"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Set "index" to the index of the lowest set bit in 32-bit integer "mask". If no bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is set to 1.</description>
+ <operation>
+tmp := 0
+IF a == 0
+ // MEM[index+31:index] is undefined
+ dst := 0
+ELSE
+ DO WHILE ((tmp &lt; 64) AND a[tmp] == 0)
+ tmp := tmp + 1
+ OD
+ MEM[index+31:index] := tmp
+ dst := (tmp == 63) ? 0 : 1
+FI
+ </operation>
+ <instruction name="BSF" form="r64, r64" xed="BSF_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_BitScanReverse64">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned __int32*" varname="index" etype="UI32" memwidth="32"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Set "index" to the index of the highest set bit in 32-bit integer "mask". If no bits are set in "a", then "index" is undefined and "dst" is set to 0, otherwise "dst" is set to 1.</description>
+ <operation>
+tmp := 63
+IF a == 0
+ // MEM[index+31:index] is undefined
+ dst := 0
+ELSE
+ DO WHILE ((tmp &gt; 0) AND a[tmp] == 0)
+ tmp := tmp - 1
+ OD
+ MEM[index+31:index] := tmp
+ dst := (tmp == 0) ? 0 : 1
+FI
+ </operation>
+ <instruction name="BSR" form="r64, r64" xed="BSR_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bittest">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__int32*" varname="a" etype="UI32" memwidth="32"/>
+ <parameter type="__int32" varname="b" etype="IMM" immwidth="5"/>
+ <description>Return the bit at index "b" of 32-bit integer "a".</description>
+ <operation>
+addr := a + ZeroExtend64(b)
+dst[0] := MEM[addr]
+ </operation>
+ <instruction name="BT" form="m32, r32" xed="BT_MEMv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bittestandcomplement">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__int32*" varname="a" etype="UI32" memwidth="32"/>
+ <parameter type="__int32" varname="b" etype="IMM" immwidth="5"/>
+ <description>Return the bit at index "b" of 32-bit integer "a", and set that bit to its complement.</description>
+ <operation>
+addr := a + ZeroExtend64(b)
+dst[0] := MEM[addr]
+MEM[addr] := ~dst[0]
+ </operation>
+ <instruction name="BTC" form="m32, r32" xed="BTC_MEMv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bittestandreset">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__int32*" varname="a" etype="UI32" memwidth="32"/>
+ <parameter type="__int32" varname="b" etype="IMM" immwidth="5"/>
+ <description>Return the bit at index "b" of 32-bit integer "a", and set that bit to zero.</description>
+ <operation>
+addr := a + ZeroExtend64(b)
+dst[0] := MEM[addr]
+MEM[addr] := 0
+ </operation>
+ <instruction name="BTR" form="m32, r32" xed="BTR_MEMv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bittestandset">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__int32*" varname="a" etype="UI32" memwidth="32"/>
+ <parameter type="__int32" varname="b" etype="IMM" immwidth="5"/>
+ <description>Return the bit at index "b" of 32-bit integer "a", and set that bit to one.</description>
+ <operation>
+addr := a + ZeroExtend64(b)
+dst[0] := MEM[addr]
+MEM[addr] := 1
+ </operation>
+ <instruction name="BTS" form="m32, r32" xed="BTS_MEMv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bittest64">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__int64*" varname="a" etype="UI64" memwidth="32"/>
+ <parameter type="__int64" varname="b" etype="IMM" immwidth="6"/>
+ <description>Return the bit at index "b" of 64-bit integer "a".</description>
+ <operation>
+addr := a + b
+dst[0] := MEM[addr]
+ </operation>
+ <instruction name="BT" form="r64, r64" xed="BT_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bittestandcomplement64">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__int64*" varname="a" etype="UI64" memwidth="32"/>
+ <parameter type="__int64" varname="b" etype="IMM" immwidth="6"/>
+ <description>Return the bit at index "b" of 64-bit integer "a", and set that bit to its complement.</description>
+ <operation>
+addr := a + b
+dst[0] := MEM[addr]
+MEM[addr] := ~dst[0]
+ </operation>
+ <instruction name="BTC" form="r64, r64" xed="BTC_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bittestandreset64">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__int64*" varname="a" etype="UI64" memwidth="32"/>
+ <parameter type="__int64" varname="b" etype="IMM" immwidth="6"/>
+ <description>Return the bit at index "b" of 64-bit integer "a", and set that bit to zero.</description>
+ <operation>
+addr := a + b
+dst[0] := MEM[addr]
+MEM[addr] := 0
+ </operation>
+ <instruction name="BTR" form="r64, r64" xed="BTR_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bittestandset64">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Bit Manipulation</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="__int64*" varname="a" etype="UI64" memwidth="32"/>
+ <parameter type="__int64" varname="b" etype="IMM" immwidth="6"/>
+ <description>Return the bit at index "b" of 64-bit integer "a", and set that bit to one.</description>
+ <operation>
+addr := a + b
+dst[0] := MEM[addr]
+MEM[addr] := 1
+ </operation>
+ <instruction name="BTS" form="r64, r64" xed="BTS_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bswap">
+ <type>Integer</type>
+ <category>Bit Manipulation</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Reverse the byte order of 32-bit integer "a", and store the result in "dst". This intrinsic is provided for conversion between little and big endian values.</description>
+ <operation>
+dst[7:0] := a[31:24]
+dst[15:8] := a[23:16]
+dst[23:16] := a[15:8]
+dst[31:24] := a[7:0]
+ </operation>
+ <instruction name="BSWAP" form="r32" xed="BSWAP_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_bswap64">
+ <type>Integer</type>
+ <category>Bit Manipulation</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Reverse the byte order of 64-bit integer "a", and store the result in "dst". This intrinsic is provided for conversion between little and big endian values.</description>
+ <operation>
+dst[7:0] := a[63:56]
+dst[15:8] := a[55:48]
+dst[23:16] := a[47:40]
+dst[31:24] := a[39:32]
+dst[39:32] := a[31:24]
+dst[47:40] := a[23:16]
+dst[55:48] := a[15:8]
+dst[63:56] := a[7:0]
+ </operation>
+ <instruction name="BSWAP" form="r64" xed="BSWAP_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_castf32_u32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <category>Cast</category>
+ <return type="unsigned __int32" varname="dst" etype="UI32"/>
+ <parameter type="float" varname="a" etype="FP32"/>
+ <description>Cast from type float to type unsigned __int32 without conversion.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_castf64_u64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <category>Cast</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="double" varname="a" etype="FP64"/>
+ <description>Cast from type double to type unsigned __int64 without conversion.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_castu32_f32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <category>Cast</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="unsigned __int32" varname="a" etype="UI32"/>
+ <description>Cast from type unsigned __int32 to type float without conversion.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_castu64_f64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <category>Cast</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Cast from type unsigned __int64 to type double without conversion.
+ This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_lrotl">
+ <type>Integer</type>
+ <category>Shift</category>
+ <return type="unsigned long" varname="dst" etype="UI32"/>
+ <parameter type="unsigned long" varname="a" etype="UI32"/>
+ <parameter type="int" varname="shift" etype="IMM" immwidth="8"/>
+ <description>Shift the bits of unsigned long integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst".</description>
+ <operation>// size := 32 or 64
+dst := a
+count := shift AND (size - 1)
+DO WHILE (count &gt; 0)
+ tmp[0] := dst[size - 1]
+ dst := (dst &lt;&lt; 1) OR tmp[0]
+ count := count - 1
+OD
+ </operation>
+ <instruction name="ROL" form="r64, imm8" xed="ROL_GPRv_IMMb"/>
+ <instruction name="ROL" form="r32, imm8" xed="ROL_GPRv_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_lrotr">
+ <type>Integer</type>
+ <category>Shift</category>
+ <return type="unsigned long" varname="dst" etype="UI32"/>
+ <parameter type="unsigned long" varname="a" etype="UI32"/>
+ <parameter type="int" varname="shift" etype="IMM" immwidth="8"/>
+ <description>Shift the bits of unsigned long integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst".</description>
+ <operation>// size := 32 or 64
+dst := a
+count := shift AND (size - 1)
+DO WHILE (count &gt; 0)
+ tmp[size - 1] := dst[0]
+ dst := (dst &gt;&gt; 1) OR tmp[size - 1]
+ count := count - 1
+OD
+ </operation>
+ <instruction name="ROR" form="r64, imm8" xed="ROR_GPRv_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" sequence="TRUE" name="_allow_cpu_features">
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned __int64" varname="a" etype="IMM" immwidth="8"/>
+ <description>Treat the processor-specific feature(s) specified in "a" as available. Multiple features may be OR'd together. See the valid feature flags below:</description>
+ <operation>
+_FEATURE_GENERIC_IA32
+_FEATURE_FPU
+_FEATURE_CMOV
+_FEATURE_MMX
+_FEATURE_FXSAVE
+_FEATURE_SSE
+_FEATURE_SSE2
+_FEATURE_SSE3
+_FEATURE_SSSE3
+_FEATURE_SSE4_1
+_FEATURE_SSE4_2
+_FEATURE_MOVBE
+_FEATURE_POPCNT
+_FEATURE_PCLMULQDQ
+_FEATURE_AES
+_FEATURE_F16C
+_FEATURE_AVX
+_FEATURE_RDRND
+_FEATURE_FMA
+_FEATURE_BMI
+_FEATURE_LZCNT
+_FEATURE_HLE
+_FEATURE_RTM
+_FEATURE_AVX2
+_FEATURE_KNCNI
+_FEATURE_AVX512F
+_FEATURE_ADX
+_FEATURE_RDSEED
+_FEATURE_AVX512ER
+_FEATURE_AVX512PF
+_FEATURE_AVX512CD
+_FEATURE_SHA
+_FEATURE_MPX
+_FEATURE_AVX512BW
+_FEATURE_AVX512VL
+_FEATURE_AVX512VBMI
+_FEATURE_AVX512_4FMAPS
+_FEATURE_AVX512_4VNNIW
+_FEATURE_AVX512_VPOPCNTDQ
+_FEATURE_AVX512_BITALG
+_FEATURE_AVX512_VBMI2
+_FEATURE_GFNI
+_FEATURE_VAES
+_FEATURE_VPCLMULQDQ
+_FEATURE_AVX512_VNNI
+_FEATURE_CLWB
+_FEATURE_RDPID
+_FEATURE_IBT
+_FEATURE_SHSTK
+_FEATURE_SGX
+_FEATURE_WBNOINVD
+_FEATURE_PCONFIG
+_FEATURE_AXV512_4VNNIB
+_FEATURE_AXV512_4FMAPH
+_FEATURE_AXV512_BITALG2
+_FEATURE_AXV512_VP2INTERSECT
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" sequence="TRUE" name="_may_i_use_cpu_feature">
+ <category>General Support</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="unsigned __int64" varname="a" etype="IMM" immwidth="8"/>
+ <description>Dynamically query the processor to determine if the processor-specific feature(s) specified in "a" are available, and return true or false (1 or 0) if the set of features is available. Multiple features may be OR'd together. This intrinsic does not check the processor vendor. See the valid feature flags below:</description>
+ <operation>
+_FEATURE_GENERIC_IA32
+_FEATURE_FPU
+_FEATURE_CMOV
+_FEATURE_MMX
+_FEATURE_FXSAVE
+_FEATURE_SSE
+_FEATURE_SSE2
+_FEATURE_SSE3
+_FEATURE_SSSE3
+_FEATURE_SSE4_1
+_FEATURE_SSE4_2
+_FEATURE_MOVBE
+_FEATURE_POPCNT
+_FEATURE_PCLMULQDQ
+_FEATURE_AES
+_FEATURE_F16C
+_FEATURE_AVX
+_FEATURE_RDRND
+_FEATURE_FMA
+_FEATURE_BMI
+_FEATURE_LZCNT
+_FEATURE_HLE
+_FEATURE_RTM
+_FEATURE_AVX2
+_FEATURE_KNCNI
+_FEATURE_AVX512F
+_FEATURE_ADX
+_FEATURE_RDSEED
+_FEATURE_AVX512ER
+_FEATURE_AVX512PF
+_FEATURE_AVX512CD
+_FEATURE_SHA
+_FEATURE_MPX
+_FEATURE_AVX512BW
+_FEATURE_AVX512VL
+_FEATURE_AVX512VBMI
+_FEATURE_AVX512_4FMAPS
+_FEATURE_AVX512_4VNNIW
+_FEATURE_AVX512_VPOPCNTDQ
+_FEATURE_AVX512_BITALG
+_FEATURE_AVX512_VBMI2
+_FEATURE_GFNI
+_FEATURE_VAES
+_FEATURE_VPCLMULQDQ
+_FEATURE_AVX512_VNNI
+_FEATURE_CLWB
+_FEATURE_RDPID
+_FEATURE_IBT
+_FEATURE_SHSTK
+_FEATURE_SGX
+_FEATURE_WBNOINVD
+_FEATURE_PCONFIG
+_FEATURE_AXV512_4VNNIB
+_FEATURE_AXV512_4FMAPH
+_FEATURE_AXV512_BITALG2
+_FEATURE_AXV512_VP2INTERSECT
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rdpmc">
+ <category>General Support</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Read the Performance Monitor Counter (PMC) specified by "a", and store up to 64-bits in "dst". The width of performance counters is implementation specific.</description>
+ <operation>dst[63:0] := ReadPMC(a)
+ </operation>
+ <instruction name="RDPMC" xed="RDPMC"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rotl">
+ <type>Integer</type>
+ <category>Shift</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="int" varname="shift" etype="IMM" immwidth="5"/>
+ <description>Shift the bits of unsigned 32-bit integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst".</description>
+ <operation>
+dst := a
+count := shift AND 31
+DO WHILE (count &gt; 0)
+ tmp[0] := dst[31]
+ dst := (dst &lt;&lt; 1) OR tmp[0]
+ count := count - 1
+OD
+ </operation>
+ <instruction name="ROL" form="r32, imm8" xed="ROL_GPRv_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rotr">
+ <type>Integer</type>
+ <category>Shift</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="int" varname="shift" etype="IMM" immwidth="5"/>
+ <description>Shift the bits of unsigned 32-bit integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst".</description>
+ <operation>
+dst := a
+count := shift AND 31
+DO WHILE (count &gt; 0)
+ tmp[31] := dst[0]
+ dst := (dst &gt;&gt; 1) OR tmp
+ count := count - 1
+OD
+ </operation>
+ <instruction name="ROR" form="r32, imm8" xed="ROR_GPRv_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rotwl">
+ <type>Integer</type>
+ <category>Shift</category>
+ <return type="unsigned short" varname="dst" etype="UI16"/>
+ <parameter type="unsigned short" varname="a" etype="UI16"/>
+ <parameter type="int" varname="shift" etype="IMM" immwidth="4"/>
+ <description>Shift the bits of unsigned 16-bit integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst".</description>
+ <operation>
+dst := a
+count := shift AND 15
+DO WHILE (count &gt; 0)
+ tmp[0] := dst[15]
+ dst := (dst &lt;&lt; 1) OR tmp[0]
+ count := count - 1
+OD
+ </operation>
+ <instruction name="ROL" form="r16, imm8" xed="ROL_GPRv_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rotwr">
+ <type>Integer</type>
+ <category>Shift</category>
+ <return type="unsigned short" varname="dst" etype="UI16"/>
+ <parameter type="unsigned short" varname="a" etype="UI16"/>
+ <parameter type="int" varname="shift" etype="IMM" immwidth="4"/>
+ <description>Shift the bits of unsigned 16-bit integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst".</description>
+ <operation>
+dst := a
+count := shift AND 15
+DO WHILE (count &gt; 0)
+ tmp[15] := dst[0]
+ dst := (dst &gt;&gt; 1) OR tmp
+ count := count - 1
+OD
+ </operation>
+ <instruction name="ROR" form="r16, imm8" xed="ROR_GPRv_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rotl64">
+ <type>Integer</type>
+ <category>Shift</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="int" varname="shift" etype="IMM" immwidth="6"/>
+ <description>Shift the bits of unsigned 64-bit integer "a" left by the number of bits specified in "shift", rotating the most-significant bit to the least-significant bit location, and store the unsigned result in "dst".</description>
+ <operation>
+dst := a
+count := shift AND 63
+DO WHILE (count &gt; 0)
+ tmp[0] := dst[63]
+ dst := (dst &lt;&lt; 1) OR tmp[0]
+ count := count - 1
+OD
+ </operation>
+ <instruction name="ROL" form="r64, imm8" xed="ROL_GPRv_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rotr64">
+ <type>Integer</type>
+ <category>Shift</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="int" varname="shift" etype="IMM" immwidth="6"/>
+ <description>Shift the bits of unsigned 64-bit integer "a" right by the number of bits specified in "shift", rotating the least-significant bit to the most-significant bit location, and store the unsigned result in "dst".</description>
+ <operation>
+dst := a
+count := shift AND 63
+DO WHILE (count &gt; 0)
+ tmp[63] := dst[0]
+ dst := (dst &gt;&gt; 1) OR tmp[63]
+ count := count - 1
+OD
+ </operation>
+ <instruction name="ROR" form="r64, imm8" xed="ROR_GPRv_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_addcarry_u32">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Arithmetic</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned char" varname="c_in" etype="UI8"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="b" etype="UI32"/>
+ <parameter type="unsigned int *" varname="out" etype="UI32" memwidth="32"/>
+ <description>Add unsigned 32-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry flag), and store the unsigned 32-bit result in "out", and the carry-out in "dst" (carry or overflow flag).</description>
+ <operation>
+tmp[32:0] := a[31:0] + b[31:0] + (c_in &gt; 0 ? 1 : 0)
+MEM[out+31:out] := tmp[31:0]
+dst[0] := tmp[32]
+dst[7:1] := 0
+ </operation>
+ <instruction name="ADC" form="r32, r32" xed="ADC_GPRv_GPRv_11"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_addcarry_u64">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Arithmetic</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned char" varname="c_in" etype="UI8"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="b" etype="UI64"/>
+ <parameter type="unsigned __int64 *" varname="out" etype="UI64" memwidth="64"/>
+ <description>Add unsigned 64-bit integers "a" and "b" with unsigned 8-bit carry-in "c_in" (carry flag), and store the unsigned 64-bit result in "out", and the carry-out in "dst" (carry or overflow flag).</description>
+ <operation>
+tmp[64:0] := a[63:0] + b[63:0] + (c_in &gt; 0 ? 1 : 0)
+MEM[out+63:out] := tmp[63:0]
+dst[0] := tmp[64]
+dst[7:1] := 0
+ </operation>
+ <instruction name="ADC" form="r64, r64" xed="ADC_GPRv_GPRv_11"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_subborrow_u32">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Arithmetic</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned char" varname="c_in" etype="UI8"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="unsigned int" varname="b" etype="UI32"/>
+ <parameter type="unsigned int *" varname="out" etype="UI32" memwidth="32"/>
+ <description>Add unsigned 8-bit borrow "c_in" (carry flag) to unsigned 32-bit integer "b", and subtract the result from unsigned 32-bit integer "a". Store the unsigned 32-bit result in "out", and the carry-out in "dst" (carry or overflow flag).</description>
+ <operation>
+tmp[32:0] := a[31:0] - (b[31:0] + (c_in &gt; 0 ? 1 : 0))
+MEM[out+31:out] := tmp[31:0]
+dst[0] := tmp[32]
+dst[7:1] := 0
+ </operation>
+ <instruction name="SBB" form="r32, r32" xed="SBB_GPRv_GPRv_19"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_subborrow_u64">
+ <type>Integer</type>
+ <type>Flag</type>
+ <category>Arithmetic</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned char" varname="c_in" etype="UI8"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="b" etype="UI64"/>
+ <parameter type="unsigned __int64 *" varname="out" etype="UI64" memwidth="64"/>
+ <description>Add unsigned 8-bit borrow "c_in" (carry flag) to unsigned 64-bit integer "b", and subtract the result from unsigned 64-bit integer "a". Store the unsigned 64-bit result in "out", and the carry-out in "dst" (carry or overflow flag).</description>
+ <operation>
+tmp[64:0] := a[63:0] - (b[63:0] + (c_in &gt; 0 ? 1 : 0))
+MEM[out+63:out] := tmp[63:0]
+dst[0] := tmp[64]
+dst[7:1] := 0
+ </operation>
+ <instruction name="SBB" form="r64, r64" xed="SBB_GPRv_GPRv_19"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_ptwrite32">
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Insert the 32-bit data from "a" into a Processor Trace stream via a PTW packet. The PTW packet will be inserted if tracing is currently enabled and ptwrite is currently enabled. The current IP will also be inserted via a FUP packet if FUPonPTW is enabled.</description>
+ <instruction name="PTWRITE" form="r32" xed="PTWRITE_GPRy"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_ptwrite64">
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Insert the 64-bit data from "a" into a Processor Trace stream via a PTW packet. The PTW packet will be inserted if tracing is currently enabled and ptwrite is currently enabled. The current IP will also be inserted via a FUP packet if FUPonPTW is enabled.</description>
+ <instruction name="PTWRITE" form="r64" xed="PTWRITE_GPRy"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_enclu_u32">
+ <category>Miscellaneous</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="const int" varname="a" etype="UI32"/>
+ <parameter type="size_t*" varname="__data" etype="UI64"/>
+ <description>Invoke the Intel SGX enclave user (non-privilege) leaf function specified by "a", and return the error code. The "__data" array contains 3 32-bit elements that may act as input, output, or be unused, depending on the semantics of the specified leaf function; these correspond to ebx, ecx, and edx.</description>
+ <instruction name="ENCLU" xed="ENCLU"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_encls_u32">
+ <category>Miscellaneous</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="const int" varname="a" etype="UI32"/>
+ <parameter type="size_t*" varname="__data" etype="UI64"/>
+ <description>Invoke the Intel SGX enclave system (privileged) leaf function specified by "a", and return the error code. The "__data" array contains 3 32-bit elements that may act as input, output, or be unused, depending on the semantics of the specified leaf function; these correspond to ebx, ecx, and edx.</description>
+ <instruction name="ENCLS" xed="ENCLS"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_enclv_u32">
+ <category>Miscellaneous</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="const int" varname="a" etype="UI32"/>
+ <parameter type="size_t*" varname="__data" etype="UI64"/>
+ <description>Invoke the Intel SGX enclave virtualized (VMM) leaf function specified by "a", and return the error code. The "__data" array contains 3 32-bit elements that may act as input, output, or be unused, depending on the semantics of the specified leaf function; these correspond to ebx, ecx, and edx.</description>
+ <instruction name="ENCLV" xed="ENCLV"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_wbinvd">
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Write back and flush internal caches.
+ Initiate writing-back and flushing of external
+ caches.</description>
+ <instruction name="WBINVD" xed="WBINVD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" sequence="TRUE" name="_cvtsh_ss">
+ <type>Floating Point</type>
+ <category>Convert</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="unsigned short" varname="a" etype="UI16"/>
+ <description>Convert the half-precision (16-bit) floating-point value "a" to a single-precision (32-bit) floating-point value, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP16_To_FP32(a[15:0])
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" sequence="TRUE" name="_cvtss_sh">
+ <type>Floating Point</type>
+ <category>Convert</category>
+ <return type="unsigned short" varname="dst" etype="UI16"/>
+ <parameter type="float" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" hint="TRUE" immtype="_MM_FROUND"/>
+ <description>Convert the single-precision (32-bit) floating-point value "a" to a half-precision (16-bit) floating-point value, and store the result in "dst".
+ [round_note]</description>
+ <operation>
+dst[15:0] := Convert_FP32_To_FP16(a[31:0])
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" vexEq="TRUE" name="_mm_clmulepi64_si128">
+ <type>Integer</type>
+ <CPUID>PCLMULQDQ</CPUID>
+ <category>Application-Targeted</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Perform a carry-less multiplication of two 64-bit integers, selected from "a" and "b" according to "imm8", and store the results in "dst".</description>
+ <operation>
+IF (imm8[0] == 0)
+ TEMP1 := a[63:0]
+ELSE
+ TEMP1 := a[127:64]
+FI
+IF (imm8[4] == 0)
+ TEMP2 := b[63:0]
+ELSE
+ TEMP2 := b[127:64]
+FI
+FOR i := 0 to 63
+ TEMP[i] := (TEMP1[0] and TEMP2[i])
+ FOR j := 1 to i
+ TEMP[i] := TEMP[i] XOR (TEMP1[j] AND TEMP2[i-j])
+ ENDFOR
+ dst[i] := TEMP[i]
+ENDFOR
+FOR i := 64 to 127
+ TEMP[i] := 0
+ FOR j := (i - 63) to 63
+ TEMP[i] := TEMP[i] XOR (TEMP1[j] AND TEMP2[i-j])
+ ENDFOR
+ dst[i] := TEMP[i]
+ENDFOR
+dst[127] := 0
+ </operation>
+ <instruction name="PCLMULQDQ" form="xmm, xmm, imm8" xed="PCLMULQDQ_XMMdq_XMMdq_IMMb"/>
+ <header>wmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_pconfig_u32">
+ <CPUID>PCONFIG</CPUID>
+ <category>Miscellaneous</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="const int" varname="a" etype="UI32"/>
+ <parameter type="size_t*" varname="__data" etype="UI64"/>
+ <description>Invoke the PCONFIG leaf function specified by "a". The "__data" array contains 3 32-bit elements that may act as input, output, or be unused, depending on the semantics of the specified leaf function; these correspond to rbx, rcx, and rdx. May return the value in eax, depending on the semantics of the specified leaf function.</description>
+ <instruction name="PCONFIG" xed="PCONFIG"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_popcnt_u32">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>POPCNT</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Count the number of bits set to 1 in unsigned 32-bit integer "a", and return that count in "dst".</description>
+ <operation>
+dst := 0
+FOR i := 0 to 31
+ IF a[i]
+ dst := dst + 1
+ FI
+ENDFOR
+ </operation>
+ <instruction name="POPCNT" form="r32, r32" xed="POPCNT_GPRv_GPRv"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_popcnt_u64">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>POPCNT</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="a" etype="UI64"/>
+ <description>Count the number of bits set to 1 in unsigned 64-bit integer "a", and return that count in "dst".</description>
+ <operation>
+dst := 0
+FOR i := 0 to 63
+ IF a[i]
+ dst := dst + 1
+ FI
+ENDFOR
+ </operation>
+ <instruction name="POPCNT" form="r64, r64" xed="POPCNT_GPRv_GPRv"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_popcnt32">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>POPCNT</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Count the number of bits set to 1 in 32-bit integer "a", and return that count in "dst".</description>
+ <operation>
+dst := 0
+FOR i := 0 to 31
+ IF a[i]
+ dst := dst + 1
+ FI
+ENDFOR
+ </operation>
+ <instruction name="POPCNT" form="r32, r32" xed="POPCNT_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_popcnt64">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>POPCNT</CPUID>
+ <category>Bit Manipulation</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Count the number of bits set to 1 in 64-bit integer "a", and return that count in "dst".</description>
+ <operation>
+dst := 0
+FOR i := 0 to 63
+ IF a[i]
+ dst := dst + 1
+ FI
+ENDFOR
+ </operation>
+ <instruction name="POPCNT" form="r64, r64" xed="POPCNT_GPRv_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_prefetch">
+ <CPUID>PREFETCHWT1</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="char const*" varname="p" etype="UI8"/>
+ <parameter type="int" varname="i" etype="IMM" immwidth="2"/>
+ <description>Fetch the line of data from memory that contains address "p" to a location in the cache heirarchy specified by the locality hint "i".</description>
+ <instruction name="PREFETCHWT1" form="m8" xed="PREFETCHWT1_MEMu8"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rdpid_u32">
+ <CPUID>RDPID</CPUID>
+ <category>General Support</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="void"/>
+ <description>Copy the IA32_TSC_AUX MSR (signature value) into "dst".</description>
+ <operation>dst[31:0] := IA32_TSC_AUX[31:0]
+ </operation>
+ <instruction name="RDPID" form="r32" xed="RDPID_GPR32u32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rdrand16_step">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>RDRAND</CPUID>
+ <category>Random</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned short*" varname="val" etype="UI16" memwidth="16"/>
+ <description>Read a hardware generated 16-bit random value and store the result in "val". Return 1 if a random value was generated, and 0 otherwise.</description>
+ <operation>IF HW_RND_GEN.ready == 1
+ val[15:0] := HW_RND_GEN.data
+ dst := 1
+ELSE
+ val[15:0] := 0
+ dst := 0
+FI
+ </operation>
+ <instruction name="RDRAND" form="r16" xed="RDRAND_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rdrand32_step">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>RDRAND</CPUID>
+ <category>Random</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int*" varname="val" etype="UI32" memwidth="32"/>
+ <description>Read a hardware generated 32-bit random value and store the result in "val". Return 1 if a random value was generated, and 0 otherwise.</description>
+ <operation>IF HW_RND_GEN.ready == 1
+ val[31:0] := HW_RND_GEN.data
+ dst := 1
+ELSE
+ val[31:0] := 0
+ dst := 0
+FI
+ </operation>
+ <instruction name="RDRAND" form="r32" xed="RDRAND_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rdrand64_step">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>RDRAND</CPUID>
+ <category>Random</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned __int64*" varname="val" etype="UI64" memwidth="64"/>
+ <description>Read a hardware generated 64-bit random value and store the result in "val". Return 1 if a random value was generated, and 0 otherwise.</description>
+ <operation>IF HW_RND_GEN.ready == 1
+ val[63:0] := HW_RND_GEN.data
+ dst := 1
+ELSE
+ val[63:0] := 0
+ dst := 0
+FI
+ </operation>
+ <instruction name="RDRAND" form="r64" xed="RDRAND_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rdseed16_step">
+ <type>Flag</type>
+ <CPUID>RDSEED</CPUID>
+ <category>Random</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned short *" varname="val" etype="UI16"/>
+ <description>Read a 16-bit NIST SP800-90B and SP800-90C compliant random value and store in "val". Return 1 if a random value was generated, and 0 otherwise.</description>
+ <operation>IF HW_NRND_GEN.ready == 1
+ val[15:0] := HW_NRND_GEN.data
+ dst := 1
+ELSE
+ val[15:0] := 0
+ dst := 0
+FI
+ </operation>
+ <instruction name="RDSEED" form="r16" xed="RDSEED_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rdseed32_step">
+ <type>Flag</type>
+ <CPUID>RDSEED</CPUID>
+ <category>Random</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int *" varname="val" etype="UI32"/>
+ <description>Read a 32-bit NIST SP800-90B and SP800-90C compliant random value and store in "val". Return 1 if a random value was generated, and 0 otherwise.</description>
+ <operation>IF HW_NRND_GEN.ready == 1
+ val[31:0] := HW_NRND_GEN.data
+ dst := 1
+ELSE
+ val[31:0] := 0
+ dst := 0
+FI
+ </operation>
+ <instruction name="RDSEED" form="r32" xed="RDSEED_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rdseed64_step">
+ <type>Flag</type>
+ <CPUID>RDSEED</CPUID>
+ <category>Random</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned __int64 *" varname="val" etype="UI64"/>
+ <description>Read a 64-bit NIST SP800-90B and SP800-90C compliant random value and store in "val". Return 1 if a random value was generated, and 0 otherwise.</description>
+ <operation>IF HW_NRND_GEN.ready == 1
+ val[63:0] := HW_NRND_GEN.data
+ dst := 1
+ELSE
+ val[63:0] := 0
+ dst := 0
+FI
+ </operation>
+ <instruction name="RDSEED" form="r64" xed="RDSEED_GPRv"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="__rdtscp">
+ <CPUID>RDTSCP</CPUID>
+ <category>General Support</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned int *" varname="mem_addr" etype="UI32" memwidth="32"/>
+ <description>Copy the current 64-bit value of the processor's time-stamp counter into "dst", and store the IA32_TSC_AUX MSR (signature value) into memory at "mem_addr".</description>
+ <operation>dst[63:0] := TimeStampCounter
+MEM[mem_addr+31:mem_addr] := IA32_TSC_AUX[31:0]
+ </operation>
+ <instruction name="RDTSCP" xed="RDTSCP"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xabort">
+ <CPUID>RTM</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="const unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Force an RTM abort. The EAX register is updated to reflect an XABORT instruction caused the abort, and the "imm8" parameter will be provided in bits [31:24] of EAX.
+ Following an RTM abort, the logical processor resumes execution at the fallback address computed through the outermost XBEGIN instruction.</description>
+ <operation>IF RTM_ACTIVE == 0
+ // nop
+ELSE
+ // restore architectural register state
+ // discard memory updates performed in transaction
+ // update EAX with status and imm8 value
+ eax[31:24] := imm8[7:0]
+ RTM_NEST_COUNT := 0
+ RTM_ACTIVE := 0
+ IF _64_BIT_MODE
+ RIP := fallbackRIP
+ ELSE
+ EIP := fallbackEIP
+ FI
+FI
+ </operation>
+ <instruction name="XABORT" form="imm8" xed="XABORT_IMMb"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xbegin">
+ <CPUID>RTM</CPUID>
+ <category>General Support</category>
+ <return type="unsigned int" varname="k" etype="UI32"/>
+ <parameter type="void"/>
+ <description>Specify the start of an RTM code region.
+ If the logical processor was not already in transactional execution, then this call causes the logical processor to transition into transactional execution.
+ On an RTM abort, the logical processor discards all architectural register and memory updates performed during the RTM execution, restores architectural state, and starts execution beginning at the fallback address computed from the outermost XBEGIN instruction. Return status of ~0 (0xFFFF) if continuing inside transaction; all other codes are aborts.</description>
+ <operation>IF RTM_NEST_COUNT &lt; MAX_RTM_NEST_COUNT
+ RTM_NEST_COUNT := RTM_NEST_COUNT + 1
+ IF RTM_NEST_COUNT == 1
+ IF _64_BIT_MODE
+ fallbackRIP := RIP
+ ELSE IF _32_BIT_MODE
+ fallbackEIP := EIP
+ FI
+
+ RTM_ACTIVE := 1
+ // enter RTM execution, record register state, start tracking memory state
+ FI
+ELSE
+ // RTM abort (see _xabort)
+FI
+ </operation>
+ <instruction name="XBEGIN" form="r32" xed="XBEGIN_RELBRz"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xend">
+ <CPUID>RTM</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Specify the end of an RTM code region.
+ If this corresponds to the outermost scope, the logical processor will attempt to commit the logical processor state atomically.
+ If the commit fails, the logical processor will perform an RTM abort.</description>
+ <operation>IF RTM_ACTIVE == 1
+ RTM_NEST_COUNT := RTM_NEST_COUNT - 1
+ IF RTM_NEST_COUNT == 0
+ // try to commit transaction
+ IF FAIL_TO_COMMIT_TRANSACTION
+ // RTM abort (see _xabort)
+ ELSE
+ RTM_ACTIVE := 0
+ FI
+ FI
+FI
+ </operation>
+ <instruction name="XEND" xed="XEND"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xtest">
+ <CPUID>RTM</CPUID>
+ <category>General Support</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="void"/>
+ <description>Query the transactional execution status, return 1 if inside a transactionally executing RTM or HLE region, and return 0 otherwise.</description>
+ <operation>IF (RTM_ACTIVE == 1 OR HLE_ACTIVE == 1)
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="XTEST" xed="XTEST"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_serialize">
+ <CPUID>SERIALIZE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <description>Serialize instruction execution, ensuring all modifications to flags, registers, and memory by previous instructions are completed before the next instruction is fetched.</description>
+ <instruction name="SERIALIZE" xed="SERIALIZE"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_sha1msg1_epu32">
+ <type>Integer</type>
+ <CPUID>SHA</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Perform an intermediate calculation for the next four SHA1 message values (unsigned 32-bit integers) using previous message values from "a" and "b", and store the result in "dst".</description>
+ <operation>
+W0 := a[127:96]
+W1 := a[95:64]
+W2 := a[63:32]
+W3 := a[31:0]
+W4 := b[127:96]
+W5 := b[95:64]
+dst[127:96] := W2 XOR W0
+dst[95:64] := W3 XOR W1
+dst[63:32] := W4 XOR W2
+dst[31:0] := W5 XOR W3
+ </operation>
+ <instruction name="SHA1MSG1" form="xmm, xmm" xed="SHA1MSG1_XMMi32_XMMi32_SHA"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_sha1msg2_epu32">
+ <type>Integer</type>
+ <CPUID>SHA</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Perform the final calculation for the next four SHA1 message values (unsigned 32-bit integers) using the intermediate result in "a" and the previous message values in "b", and store the result in "dst".</description>
+ <operation>
+W13 := b[95:64]
+W14 := b[63:32]
+W15 := b[31:0]
+W16 := (a[127:96] XOR W13) &lt;&lt;&lt; 1
+W17 := (a[95:64] XOR W14) &lt;&lt;&lt; 1
+W18 := (a[63:32] XOR W15) &lt;&lt;&lt; 1
+W19 := (a[31:0] XOR W16) &lt;&lt;&lt; 1
+dst[127:96] := W16
+dst[95:64] := W17
+dst[63:32] := W18
+dst[31:0] := W19
+ </operation>
+ <instruction name="SHA1MSG2" form="xmm, xmm" xed="SHA1MSG2_XMMi32_XMMi32_SHA"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_sha1nexte_epu32">
+ <type>Integer</type>
+ <CPUID>SHA</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Calculate SHA1 state variable E after four rounds of operation from the current SHA1 state variable "a", add that value to the scheduled values (unsigned 32-bit integers) in "b", and store the result in "dst".</description>
+ <operation>
+tmp := (a[127:96] &lt;&lt;&lt; 30)
+dst[127:96] := b[127:96] + tmp
+dst[95:64] := b[95:64]
+dst[63:32] := b[63:32]
+dst[31:0] := b[31:0]
+ </operation>
+ <instruction name="SHA1NEXTE" form="xmm, xmm" xed="SHA1NEXTE_XMMi32_XMMi32_SHA"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_sha1rnds4_epu32">
+ <type>Integer</type>
+ <CPUID>SHA</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="const int" varname="func" etype="IMM" immwidth="2"/>
+ <description>Perform four rounds of SHA1 operation using an initial SHA1 state (A,B,C,D) from "a" and some pre-computed sum of the next 4 round message values (unsigned 32-bit integers), and state variable E from "b", and store the updated SHA1 state (A,B,C,D) in "dst". "func" contains the logic functions and round constants.</description>
+ <operation>IF (func[1:0] == 0)
+ f := f0()
+ K := K0
+ELSE IF (func[1:0] == 1)
+ f := f1()
+ K := K1
+ELSE IF (func[1:0] == 2)
+ f := f2()
+ K := K2
+ELSE IF (func[1:0] == 3)
+ f := f3()
+ K := K3
+FI
+A := a[127:96]
+B := a[95:64]
+C := a[63:32]
+D := a[31:0]
+W[0] := b[127:96]
+W[1] := b[95:64]
+W[2] := b[63:32]
+W[3] := b[31:0]
+A[1] := f(B, C, D) + (A &lt;&lt;&lt; 5) + W[0] + K
+B[1] := A
+C[1] := B &lt;&lt;&lt; 30
+D[1] := C
+E[1] := D
+FOR i := 1 to 3
+ A[i+1] := f(B[i], C[i], D[i]) + (A[i] &lt;&lt;&lt; 5) + W[i] + E[i] + K
+ B[i+1] := A[i]
+ C[i+1] := B[i] &lt;&lt;&lt; 30
+ D[i+1] := C[i]
+ E[i+1] := D[i]
+ENDFOR
+dst[127:96] := A[4]
+dst[95:64] := B[4]
+dst[63:32] := C[4]
+dst[31:0] := D[4]
+ </operation>
+ <instruction name="SHA1RNDS4" form="xmm, xmm, imm8" xed="SHA1RNDS4_XMMi32_XMMi32_IMM8_SHA"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_sha256msg1_epu32">
+ <type>Integer</type>
+ <CPUID>SHA</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Perform an intermediate calculation for the next four SHA256 message values (unsigned 32-bit integers) using previous message values from "a" and "b", and store the result in "dst".</description>
+ <operation>W4 := b[31:0]
+W3 := a[127:96]
+W2 := a[95:64]
+W1 := a[63:32]
+W0 := a[31:0]
+dst[127:96] := W3 + sigma0(W4)
+dst[95:64] := W2 + sigma0(W3)
+dst[63:32] := W1 + sigma0(W2)
+dst[31:0] := W0 + sigma0(W1)
+ </operation>
+ <instruction name="SHA256MSG1" form="xmm, xmm" xed="SHA256MSG1_XMMi32_XMMi32_SHA"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_sha256msg2_epu32">
+ <type>Integer</type>
+ <CPUID>SHA</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Perform the final calculation for the next four SHA256 message values (unsigned 32-bit integers) using previous message values from "a" and "b", and store the result in "dst"."</description>
+ <operation>W14 := b[95:64]
+W15 := b[127:96]
+W16 := a[31:0] + sigma1(W14)
+W17 := a[63:32] + sigma1(W15)
+W18 := a[95:64] + sigma1(W16)
+W19 := a[127:96] + sigma1(W17)
+dst[127:96] := W19
+dst[95:64] := W18
+dst[63:32] := W17
+dst[31:0] := W16
+ </operation>
+ <instruction name="SHA256MSG2" form="xmm, xmm" xed="SHA256MSG2_XMMi32_XMMi32_SHA"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm_sha256rnds2_epu32">
+ <type>Integer</type>
+ <CPUID>SHA</CPUID>
+ <category>Cryptography</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <parameter type="__m128i" varname="k" etype="UI32"/>
+ <description>Perform 2 rounds of SHA256 operation using an initial SHA256 state (C,D,G,H) from "a", an initial SHA256 state (A,B,E,F) from "b", and a pre-computed sum of the next 2 round message values (unsigned 32-bit integers) and the corresponding round constants from "k", and store the updated SHA256 state (A,B,E,F) in "dst".</description>
+ <operation>A[0] := b[127:96]
+B[0] := b[95:64]
+C[0] := a[127:96]
+D[0] := a[95:64]
+E[0] := b[63:32]
+F[0] := b[31:0]
+G[0] := a[63:32]
+H[0] := a[31:0]
+W_K[0] := k[31:0]
+W_K[1] := k[63:32]
+FOR i := 0 to 1
+ A[i+1] := Ch(E[i], F[i], G[i]) + sum1(E[i]) + W_K[i] + H[i] + Maj(A[i], B[i], C[i]) + sum0(A[i])
+ B[i+1] := A[i]
+ C[i+1] := B[i]
+ D[i+1] := C[i]
+ E[i+1] := Ch(E[i], F[i], G[i]) + sum1(E[i]) + W_K[i] + H[i] + D[i]
+ F[i+1] := E[i]
+ G[i+1] := F[i]
+ H[i+1] := G[i]
+ENDFOR
+dst[127:96] := A[2]
+dst[95:64] := B[2]
+dst[63:32] := E[2]
+dst[31:0] := F[2]
+ </operation>
+ <instruction name="SHA256RNDS2" form="xmm, xmm" xed="SHA256RNDS2_XMMi32_XMMi32_SHA"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_MM_TRANSPOSE4_PS">
+ <CPUID>SSE</CPUID>
+ <category>Swizzle</category>
+ <return type="void"/>
+ <parameter type="__m128" varname="row0" etype="FP32"/>
+ <parameter type="__m128" varname="row1" etype="FP32"/>
+ <parameter type="__m128" varname="row2" etype="FP32"/>
+ <parameter type="__m128" varname="row3" etype="FP32"/>
+ <description>Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision (32-bit) floating-point elements in "row0", "row1", "row2", and "row3", and store the transposed matrix in these vectors ("row0" now contains column 0, etc.).</description>
+ <operation>
+__m128 tmp3, tmp2, tmp1, tmp0;
+tmp0 := _mm_unpacklo_ps(row0, row1);
+tmp2 := _mm_unpacklo_ps(row2, row3);
+tmp1 := _mm_unpackhi_ps(row0, row1);
+tmp3 := _mm_unpackhi_ps(row2, row3);
+row0 := _mm_movelh_ps(tmp0, tmp2);
+row1 := _mm_movehl_ps(tmp2, tmp0);
+row2 := _mm_movelh_ps(tmp1, tmp3);
+row3 := _mm_movehl_ps(tmp3, tmp1);
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_getcsr">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="void"/>
+ <description>Get the unsigned 32-bit value of the MXCSR control and status register.</description>
+ <operation>dst[31:0] := MXCSR
+ </operation>
+ <instruction name="STMXCSR" form="m32" xed="STMXCSR_MEMd"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_setcsr">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Set the MXCSR control and status register with the value in unsigned 32-bit integer "a".</description>
+ <operation>
+MXCSR := a[31:0]
+ </operation>
+ <instruction name="LDMXCSR" form="m32" xed="LDMXCSR_MEMd"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_MM_GET_EXCEPTION_STATE">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <description>Macro: Get the exception state bits from the MXCSR control and status register. The exception state may contain any of the following flags: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, _MM_EXCEPT_INEXACT</description>
+ <operation>dst[31:0] := MXCSR &amp; _MM_EXCEPT_MASK
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_MM_SET_EXCEPTION_STATE">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Macro: Set the exception state bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The exception state may contain any of the following flags: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, _MM_EXCEPT_INEXACT</description>
+ <operation>MXCSR := a[31:0] AND ~_MM_EXCEPT_MASK
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_MM_GET_EXCEPTION_MASK">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <description>Macro: Get the exception mask bits from the MXCSR control and status register. The exception mask may contain any of the following flags: _MM_MASK_INVALID, _MM_MASK_DIV_ZERO, _MM_MASK_DENORM, _MM_MASK_OVERFLOW, _MM_MASK_UNDERFLOW, _MM_MASK_INEXACT</description>
+ <operation>dst[31:0] := MXCSR &amp; _MM_MASK_MASK
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_MM_SET_EXCEPTION_MASK">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Macro: Set the exception mask bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The exception mask may contain any of the following flags: _MM_MASK_INVALID, _MM_MASK_DIV_ZERO, _MM_MASK_DENORM, _MM_MASK_OVERFLOW, _MM_MASK_UNDERFLOW, _MM_MASK_INEXACT</description>
+ <operation>MXCSR := a[31:0] AND ~_MM_MASK_MASK
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_MM_GET_ROUNDING_MODE">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <description>Macro: Get the rounding mode bits from the MXCSR control and status register. The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO</description>
+ <operation>dst[31:0] := MXCSR &amp; _MM_ROUND_MASK
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_MM_SET_ROUNDING_MODE">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Macro: Set the rounding mode bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO</description>
+ <operation>MXCSR := a[31:0] AND ~_MM_ROUND_MASK
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_MM_GET_FLUSH_ZERO_MODE">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <description>Macro: Get the flush zero bits from the MXCSR control and status register. The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF</description>
+ <operation>dst[31:0] := MXCSR &amp; _MM_FLUSH_MASK
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_MM_SET_FLUSH_ZERO_MODE">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Macro: Set the flush zero bits of the MXCSR control and status register to the value in unsigned 32-bit integer "a". The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF</description>
+ <operation>MXCSR := a[31:0] AND ~_MM_FLUSH_MASK
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_prefetch">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="char const*" varname="p" etype="UI8"/>
+ <parameter type="int" varname="i" etype="IMM" immwidth="2"/>
+ <description>Fetch the line of data from memory that contains address "p" to a location in the cache heirarchy specified by the locality hint "i".</description>
+ <instruction name="PREFETCHNTA" form="m8" xed="PREFETCHNTA_MEMmprefetch"/>
+ <instruction name="PREFETCHT0" form="m8" xed="PREFETCHT0_MEMmprefetch"/>
+ <instruction name="PREFETCHT1" form="m8" xed="PREFETCHT1_MEMmprefetch"/>
+ <instruction name="PREFETCHT2" form="m8" xed="PREFETCHT2_MEMmprefetch"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_sfence">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Perform a serializing operation on all store-to-memory instructions that were issued prior to this instruction. Guarantees that every store instruction that precedes, in program order, is globally visible before any store instruction which follows the fence in program order.</description>
+ <instruction name="SFENCE" xed="SFENCE"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_max_pi16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m64" varname="dst" etype="SI16"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PMAXSW" form="mm, mm" xed="PMAXSW_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_pmaxsw">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m64" varname="dst" etype="SI16"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PMAXSW" form="mm, mm" xed="PMAXSW_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_max_pu8">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PMAXUB" form="mm, mm" xed="PMAXUB_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_pmaxub">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PMAXUB" form="mm, mm" xed="PMAXUB_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_min_pi16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m64" varname="dst" etype="SI16"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PMINSW" form="mm, mm" xed="PMINSW_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_pminsw">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m64" varname="dst" etype="SI16"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PMINSW" form="mm, mm" xed="PMINSW_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_min_pu8">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PMINUB" form="mm, mm" xed="PMINUB_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_pminub">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PMINUB" form="mm, mm" xed="PMINUB_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_mulhi_pu16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+ </operation>
+ <instruction name="PMULHUW" form="mm, mm" xed="PMULHUW_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_pmulhuw">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+ </operation>
+ <instruction name="PMULHUW" form="mm, mm" xed="PMULHUW_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_avg_pu8">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) &gt;&gt; 1
+ENDFOR
+ </operation>
+ <instruction name="PAVGB" form="mm, mm" xed="PAVGB_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_pavgb">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) &gt;&gt; 1
+ENDFOR
+ </operation>
+ <instruction name="PAVGB" form="mm, mm" xed="PAVGB_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_avg_pu16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) &gt;&gt; 1
+ENDFOR
+ </operation>
+ <instruction name="PAVGW" form="mm, mm" xed="PAVGW_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_pavgw">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="__m64" varname="b" etype="UI16"/>
+ <description>Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) &gt;&gt; 1
+ENDFOR
+ </operation>
+ <instruction name="PAVGW" form="mm, mm" xed="PAVGW_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_sad_pu8">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <category>Miscellaneous</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce four unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i])
+ENDFOR
+dst[15:0] := tmp[7:0] + tmp[15:8] + tmp[23:16] + tmp[31:24] + tmp[39:32] + tmp[47:40] + tmp[55:48] + tmp[63:56]
+dst[63:16] := 0
+ </operation>
+ <instruction name="PSADBW" form="mm, mm" xed="PSADBW_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_psadbw">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <category>Miscellaneous</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce four unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i])
+ENDFOR
+dst[15:0] := tmp[7:0] + tmp[15:8] + tmp[23:16] + tmp[31:24] + tmp[39:32] + tmp[47:40] + tmp[55:48] + tmp[63:56]
+dst[63:16] := 0
+ </operation>
+ <instruction name="PSADBW" form="mm, mm" xed="PSADBW_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cvtsi32_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="b" etype="SI32"/>
+ <description>Convert the signed 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CVTSI2SS" form="xmm, r32" xed="CVTSI2SS_XMMss_GPR32d"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cvt_si2ss">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="b" etype="SI32"/>
+ <description>Convert the signed 32-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CVTSI2SS" form="xmm, r32" xed="CVTSI2SS_XMMss_GPR32d"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_cvtsi64_ss">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__int64" varname="b" etype="SI64"/>
+ <description>Convert the signed 64-bit integer "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := Convert_Int64_To_FP32(b[63:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="CVTSI2SS" form="xmm, r64" xed="CVTSI2SS_XMMss_GPR64q"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_cvtpi32_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m64" varname="b" etype="SI32"/>
+ <description>Convert packed 32-bit integers in "b" to packed single-precision (32-bit) floating-point elements, store the results in the lower 2 elements of "dst", and copy the upper 2 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+dst[63:32] := Convert_Int32_To_FP32(b[63:32])
+dst[95:64] := a[95:64]
+dst[127:96] := a[127:96]
+ </operation>
+ <instruction name="CVTPI2PS" form="xmm, mm" xed="CVTPI2PS_XMMq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_cvt_pi2ps">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m64" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "b" to packed single-precision (32-bit) floating-point elements, store the results in the lower 2 elements of "dst", and copy the upper 2 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := Convert_Int32_To_FP32(b[31:0])
+dst[63:32] := Convert_Int32_To_FP32(b[63:32])
+dst[95:64] := a[95:64]
+dst[127:96] := a[127:96]
+ </operation>
+ <instruction name="CVTPI2PS" form="xmm, mm" xed="CVTPI2PS_XMMq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_cvtpi16_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <description>Convert packed 16-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ m := j*32
+ dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i])
+ENDFOR
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_cvtpu16_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <description>Convert packed unsigned 16-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ m := j*32
+ dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i])
+ENDFOR
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_cvtpi8_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI8"/>
+ <description>Convert the lower packed 8-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*8
+ m := j*32
+ dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i])
+ENDFOR
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_cvtpu8_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <description>Convert the lower packed unsigned 8-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*8
+ m := j*32
+ dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i])
+ENDFOR
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_cvtpi32x2_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="SI32"/>
+ <parameter type="__m64" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, store the results in the lower 2 elements of "dst", then covert the packed signed 32-bit integers in "b" to single-precision (32-bit) floating-point element, and store the results in the upper 2 elements of "dst".</description>
+ <operation>
+dst[31:0] := Convert_Int32_To_FP32(a[31:0])
+dst[63:32] := Convert_Int32_To_FP32(a[63:32])
+dst[95:64] := Convert_Int32_To_FP32(b[31:0])
+dst[127:96] := Convert_Int32_To_FP32(b[63:32])
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_stream_pi">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m64*" varname="mem_addr" etype="FP32" memwidth="64"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <description>Store 64-bits of integer data from "a" into memory using a non-temporal memory hint.</description>
+ <operation>
+MEM[mem_addr+63:mem_addr] := a[63:0]
+ </operation>
+ <instruction name="MOVNTQ" form="m64, mm" xed="MOVNTQ_MEMq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_maskmove_si64">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="mask" etype="UI8"/>
+ <parameter type="char*" varname="mem_addr" etype="UI8" memwidth="64"/>
+ <description>Conditionally store 8-bit integer elements from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element) and a non-temporal memory hint.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ IF mask[i+7]
+ MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="MASKMOVQ" form="mm, mm" xed="MASKMOVQ_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_maskmovq">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="mask" etype="UI8"/>
+ <parameter type="char*" varname="mem_addr" etype="UI8" memwidth="64"/>
+ <description>Conditionally store 8-bit integer elements from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element).</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ IF mask[i+7]
+ MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="MASKMOVQ" form="mm, mm" xed="MASKMOVQ_MMXq_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_extract_pi16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Swizzle</category>
+ <return type="int" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract a 16-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst".</description>
+ <operation>
+dst[15:0] := (a[63:0] &gt;&gt; (imm8[1:0] * 16))[15:0]
+dst[31:16] := 0
+ </operation>
+ <instruction name="PEXTRW" form="r32, mm, imm8" xed="PEXTRW_GPR32_MMXq_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_pextrw">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Swizzle</category>
+ <return type="int" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract a 16-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst".</description>
+ <operation>
+dst[15:0] := (a[63:0] &gt;&gt; (imm8[1:0] * 16))[15:0]
+dst[31:16] := 0
+ </operation>
+ <instruction name="PEXTRW" form="r32, mm, imm8" xed="PEXTRW_GPR32_MMXq_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_insert_pi16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="int" varname="i" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[63:0] := a[63:0]
+sel := imm8[1:0]*16
+dst[sel+15:sel] := i[15:0]
+ </operation>
+ <instruction name="PINSRW" form="mm, r32, imm8" xed="PINSRW_MMXq_GPR32_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_pinsrw">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="int" varname="i" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[63:0] := a[63:0]
+sel := imm8[1:0]*16
+dst[sel+15:sel] := i[15:0]
+ </operation>
+ <instruction name="PINSRW" form="mm, r32, imm8" xed="PINSRW_MMXq_GPR32_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_movemask_pi8">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Miscellaneous</category>
+ <return type="int" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <description>Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[j] := a[i+7]
+ENDFOR
+dst[MAX:8] := 0
+ </operation>
+ <instruction name="PMOVMSKB" form="r32, mm" xed="PMOVMSKB_GPR32_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_pmovmskb">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Miscellaneous</category>
+ <return type="int" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <description>Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[j] := a[i+7]
+ENDFOR
+dst[MAX:8] := 0
+ </operation>
+ <instruction name="PMOVMSKB" form="r32, mm" xed="PMOVMSKB_GPR32_MMXq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_shuffle_pi16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in "a" using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[15:0] := src[15:0]
+ 1: tmp[15:0] := src[31:16]
+ 2: tmp[15:0] := src[47:32]
+ 3: tmp[15:0] := src[63:48]
+ ESAC
+ RETURN tmp[15:0]
+}
+dst[15:0] := SELECT4(a[63:0], imm8[1:0])
+dst[31:16] := SELECT4(a[63:0], imm8[3:2])
+dst[47:32] := SELECT4(a[63:0], imm8[5:4])
+dst[63:48] := SELECT4(a[63:0], imm8[7:6])
+ </operation>
+ <instruction name="PSHUFW" form="mm, mm, imm8" xed="PSHUFW_MMXq_MMXq_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_m_pshufw">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in "a" using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[15:0] := src[15:0]
+ 1: tmp[15:0] := src[31:16]
+ 2: tmp[15:0] := src[47:32]
+ 3: tmp[15:0] := src[63:48]
+ ESAC
+ RETURN tmp[15:0]
+}
+dst[15:0] := SELECT4(a[63:0], imm8[1:0])
+dst[31:16] := SELECT4(a[63:0], imm8[3:2])
+dst[47:32] := SELECT4(a[63:0], imm8[5:4])
+dst[63:48] := SELECT4(a[63:0], imm8[7:6])
+ </operation>
+ <instruction name="PSHUFW" form="mm, mm, imm8" xed="PSHUFW_MMXq_MMXq_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_add_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Add the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := a[31:0] + b[31:0]
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="ADDSS" form="xmm, xmm" xed="ADDSS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_add_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Add packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="ADDPS" form="xmm, xmm" xed="ADDPS_XMMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_sub_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Subtract the lower single-precision (32-bit) floating-point element in "b" from the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := a[31:0] - b[31:0]
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="SUBSS" form="xmm, xmm" xed="SUBSS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_sub_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Subtract packed single-precision (32-bit) floating-point elements in "b" from packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="SUBPS" form="xmm, xmm" xed="SUBPS_XMMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_mul_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Multiply the lower single-precision (32-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := a[31:0] * b[31:0]
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="MULSS" form="xmm, xmm" xed="MULSS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_mul_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Multiply packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[i+31:i] * b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="MULPS" form="xmm, xmm" xed="MULPS_XMMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_div_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Divide the lower single-precision (32-bit) floating-point element in "a" by the lower single-precision (32-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := a[31:0] / b[31:0]
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="DIVSS" form="xmm, xmm" xed="DIVSS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_div_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Divide packed single-precision (32-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := a[i+31:i] / b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="DIVPS" form="xmm, xmm" xed="DIVPS_XMMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_sqrt_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the square root of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := SQRT(a[31:0])
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="SQRTSS" form="xmm, xmm" xed="SQRTSS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_sqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := SQRT(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="SQRTPS" form="xmm, xmm" xed="SQRTPS_XMMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_rcp_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12.</description>
+ <operation>
+dst[31:0] := (1.0 / a[31:0])
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="RCPSS" form="xmm, xmm" xed="RCPSS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_rcp_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := (1.0 / a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="RCPPS" form="xmm, xmm" xed="RCPPS_XMMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_rsqrt_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of the lower single-precision (32-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst". The maximum relative error for this approximation is less than 1.5*2^-12.</description>
+ <operation>
+dst[31:0] := (1.0 / SQRT(a[31:0]))
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="RSQRTSS" form="xmm, xmm" xed="RSQRTSS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_rsqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the approximate reciprocal square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". The maximum relative error for this approximation is less than 1.5*2^-12.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := (1.0 / SQRT(a[i+31:i]))
+ENDFOR
+ </operation>
+ <instruction name="RSQRTPS" form="xmm, xmm" xed="RSQRTPS_XMMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_min_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper element of "dst".</description>
+ <operation>
+dst[31:0] := MIN(a[31:0], b[31:0])
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="MINSS" form="xmm, xmm" xed="MINSS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_min_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="MINPS" form="xmm, xmm" xed="MINPS_XMMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_max_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper element of "dst".</description>
+ <operation>
+dst[31:0] := MAX(a[31:0], b[31:0])
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="MAXSS" form="xmm, xmm" xed="MAXSS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_max_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="MAXPS" form="xmm, xmm" xed="MAXPS_XMMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_and_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise AND of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := (a[i+31:i] AND b[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="ANDPS" form="xmm, xmm" xed="ANDPS_XMMxud_XMMxud"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_andnot_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise NOT of packed single-precision (32-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ((NOT a[i+31:i]) AND b[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="ANDNPS" form="xmm, xmm" xed="ANDNPS_XMMxud_XMMxud"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_or_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise OR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[i+31:i] OR b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="ORPS" form="xmm, xmm" xed="ORPS_XMMxud_XMMxud"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_xor_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Logical</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the bitwise XOR of packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[i+31:i] XOR b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="XORPS" form="xmm, xmm" xed="XORPS_XMMxud_XMMxud"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpeq_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for equality, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := ( a[31:0] == b[31:0] ) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpeq_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmplt_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for less-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := ( a[31:0] &lt; b[31:0] ) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmplt_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] &lt; b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmple_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := ( a[31:0] &lt;= b[31:0] ) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmple_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] &lt;= b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpgt_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for greater-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := ( a[31:0] &gt; b[31:0] ) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpgt_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] &gt; b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpge_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for greater-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := ( a[31:0] &gt;= b[31:0] ) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpge_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for greater-than-or-equal, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] &gt;= b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpneq_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := ( a[31:0] != b[31:0] ) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpneq_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-equal, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] != b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpnlt_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := (!( a[31:0] &lt; b[31:0] )) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpnlt_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := !( a[i+31:i] &lt; b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpnle_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := (!( a[31:0] &lt;= b[31:0] )) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpnle_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := (!( a[i+31:i] &lt;= b[i+31:i] )) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpngt_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := (!( a[31:0] &gt; b[31:0] )) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpngt_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := (!( a[i+31:i] &gt; b[i+31:i] )) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpnge_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := (!( a[31:0] &gt;= b[31:0] )) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpnge_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := (!( a[i+31:i] &gt;= b[i+31:i] )) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpord_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>dst[31:0] := ( a[31:0] != NaN AND b[31:0] != NaN ) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpord_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] != NaN AND b[i+31:i] != NaN ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpunord_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>dst[31:0] := ( a[31:0] == NaN OR b[31:0] == NaN ) ? 0xFFFFFFFF : 0
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="CMPSS" form="xmm, xmm, imm8" xed="CMPSS_XMMss_XMMss_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cmpunord_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare packed single-precision (32-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] == NaN OR b[i+31:i] == NaN ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPS" form="xmm, xmm, imm8" xed="CMPPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_comieq_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[31:0] == b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISS" form="xmm, xmm" xed="COMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_comilt_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[31:0] &lt; b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISS" form="xmm, xmm" xed="COMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_comile_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[31:0] &lt;= b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISS" form="xmm, xmm" xed="COMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_comigt_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[31:0] &gt; b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISS" form="xmm, xmm" xed="COMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_comige_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[31:0] &gt;= b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISS" form="xmm, xmm" xed="COMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_comineq_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[31:0] != b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISS" form="xmm, xmm" xed="COMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_ucomieq_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[31:0] == b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISS" form="xmm, xmm" xed="UCOMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_ucomilt_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[31:0] &lt; b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISS" form="xmm, xmm" xed="UCOMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_ucomile_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[31:0] &lt;= b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISS" form="xmm, xmm" xed="UCOMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_ucomigt_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[31:0] &gt; b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISS" form="xmm, xmm" xed="UCOMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_ucomige_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[31:0] &gt;= b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISS" form="xmm, xmm" xed="UCOMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_ucomineq_ss">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compare the lower single-precision (32-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[31:0] != b[31:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISS" form="xmm, xmm" xed="UCOMISS_XMMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cvtss_si32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_Int32(a[31:0])
+ </operation>
+ <instruction name="CVTSS2SI" form="r32, xmm" xed="CVTSS2SI_GPR32d_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cvt_ss2si">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_Int32(a[31:0])
+ </operation>
+ <instruction name="CVTSS2SI" form="r32, xmm" xed="CVTSS2SI_GPR32d_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_cvtss_si64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_Int64(a[31:0])
+ </operation>
+ <instruction name="CVTSS2SI" form="r64, xmm" xed="CVTSS2SI_GPR64q_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_cvtss_f32">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="float" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Copy the lower single-precision (32-bit) floating-point element of "a" to "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+ </operation>
+ <instruction name="MOVSS" form="m32, xmm" xed="MOVSS_MEMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_cvtps_pi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="CVTPS2PI" form="mm, xmm" xed="CVTPS2PI_MMXq_XMMq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_cvt_ps2pi">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="CVTPS2PI" form="mm, xmm" xed="CVTPS2PI_MMXq_XMMq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cvttss_si32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0])
+ </operation>
+ <instruction name="CVTTSS2SI" form="r32, xmm" xed="CVTTSS2SI_GPR32d_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_cvtt_ss2si">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0])
+ </operation>
+ <instruction name="CVTTSS2SI" form="r32, xmm" xed="CVTTSS2SI_GPR32d_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_cvttss_si64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0])
+ </operation>
+ <instruction name="CVTTSS2SI" form="r64, xmm" xed="CVTTSS2SI_GPR64q_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_cvttps_pi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="CVTTPS2PI" form="mm, xmm" xed="CVTTPS2PI_MMXq_XMMq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_cvtt_ps2pi">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="CVTTPS2PI" form="mm, xmm" xed="CVTTPS2PI_MMXq_XMMq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_cvtps_pi16">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 16-bit integers, and store the results in "dst". Note: this intrinsic will generate 0x7FFF, rather than 0x8000, for input values between 0x7FFF and 0x7FFFFFFF.</description>
+ <operation>
+FOR j := 0 to 3
+ i := 16*j
+ k := 32*j
+ IF a[k+31:k] &gt;= FP32(0x7FFF) &amp;&amp; a[k+31:k] &lt;= FP32(0x7FFFFFFF)
+ dst[i+15:i] := 0x7FFF
+ ELSE
+ dst[i+15:i] := Convert_FP32_To_Int16(a[k+31:k])
+ FI
+ENDFOR
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_cvtps_pi8">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="SI8"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 8-bit integers, and store the results in lower 4 elements of "dst". Note: this intrinsic will generate 0x7F, rather than 0x80, for input values between 0x7F and 0x7FFFFFFF.</description>
+ <operation>
+FOR j := 0 to 3
+ i := 8*j
+ k := 32*j
+ IF a[k+31:k] &gt;= FP32(0x7F) &amp;&amp; a[k+31:k] &lt;= FP32(0x7FFFFFFF)
+ dst[i+7:i] := 0x7F
+ ELSE
+ dst[i+7:i] := Convert_FP32_To_Int8(a[k+31:k])
+ FI
+ENDFOR
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_set_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Set</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="a" etype="FP32"/>
+ <description>Copy single-precision (32-bit) floating-point element "a" to the lower element of "dst", and zero the upper 3 elements.</description>
+ <operation>
+dst[31:0] := a[31:0]
+dst[127:32] := 0
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_set1_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Set</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="a" etype="FP32"/>
+ <description>Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_set_ps1">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Set</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="a" etype="FP32"/>
+ <description>Broadcast single-precision (32-bit) floating-point value "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_set_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Set</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="e3" etype="FP32"/>
+ <parameter type="float" varname="e2" etype="FP32"/>
+ <parameter type="float" varname="e1" etype="FP32"/>
+ <parameter type="float" varname="e0" etype="FP32"/>
+ <description>Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values.</description>
+ <operation>
+dst[31:0] := e0
+dst[63:32] := e1
+dst[95:64] := e2
+dst[127:96] := e3
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_setr_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Set</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float" varname="e3" etype="FP32"/>
+ <parameter type="float" varname="e2" etype="FP32"/>
+ <parameter type="float" varname="e1" etype="FP32"/>
+ <parameter type="float" varname="e0" etype="FP32"/>
+ <description>Set packed single-precision (32-bit) floating-point elements in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[31:0] := e3
+dst[63:32] := e2
+dst[95:64] := e1
+dst[127:96] := e0
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_setzero_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Set</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m128 with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="XORPS" form="xmm, xmm" xed="XORPS_XMMxud_XMMxud"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_loadh_pi">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m64 const*" varname="mem_addr" etype="FP32" memwidth="64"/>
+ <description>Load 2 single-precision (32-bit) floating-point elements from memory into the upper 2 elements of "dst", and copy the lower 2 elements from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[31:0] := a[31:0]
+dst[63:32] := a[63:32]
+dst[95:64] := MEM[mem_addr+31:mem_addr]
+dst[127:96] := MEM[mem_addr+63:mem_addr+32]
+ </operation>
+ <instruction name="MOVHPS" form="xmm, m64" xed="MOVHPS_XMMq_MEMq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_loadl_pi">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m64 const*" varname="mem_addr" etype="FP32" memwidth="64"/>
+ <description>Load 2 single-precision (32-bit) floating-point elements from memory into the lower 2 elements of "dst", and copy the upper 2 elements from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[31:0] := MEM[mem_addr+31:mem_addr]
+dst[63:32] := MEM[mem_addr+63:mem_addr+32]
+dst[95:64] := a[95:64]
+dst[127:96] := a[127:96]
+ </operation>
+ <instruction name="MOVLPS" form="xmm, m64" xed="MOVLPS_XMMq_MEMq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_load_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float const*" varname="mem_addr" etype="FP32" memwidth="32"/>
+ <description>Load a single-precision (32-bit) floating-point element from memory into the lower of "dst", and zero the upper 3 elements. "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[31:0] := MEM[mem_addr+31:mem_addr]
+dst[127:32] := 0
+ </operation>
+ <instruction name="MOVSS" form="xmm, m32" xed="MOVSS_XMMdq_MEMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_load1_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float const*" varname="mem_addr" etype="FP32" memwidth="32"/>
+ <description>Load a single-precision (32-bit) floating-point element from memory into all elements of "dst".</description>
+ <operation>
+dst[31:0] := MEM[mem_addr+31:mem_addr]
+dst[63:32] := MEM[mem_addr+31:mem_addr]
+dst[95:64] := MEM[mem_addr+31:mem_addr]
+dst[127:96] := MEM[mem_addr+31:mem_addr]
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_load_ps1">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float const*" varname="mem_addr" etype="FP32" memwidth="32"/>
+ <description>Load a single-precision (32-bit) floating-point element from memory into all elements of "dst".</description>
+ <operation>
+dst[31:0] := MEM[mem_addr+31:mem_addr]
+dst[63:32] := MEM[mem_addr+31:mem_addr]
+dst[95:64] := MEM[mem_addr+31:mem_addr]
+dst[127:96] := MEM[mem_addr+31:mem_addr]
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_load_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float const*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <description>Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from memory into "dst".
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+ </operation>
+ <instruction name="MOVAPS" form="xmm, m128" xed="MOVAPS_XMMps_MEMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_loadu_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float const*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <description>Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+ </operation>
+ <instruction name="MOVUPS" form="xmm, m128" xed="MOVUPS_XMMps_MEMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_loadr_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Load</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="float const*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <description>Load 4 single-precision (32-bit) floating-point elements from memory into "dst" in reverse order. mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[31:0] := MEM[mem_addr+127:mem_addr+96]
+dst[63:32] := MEM[mem_addr+95:mem_addr+64]
+dst[95:64] := MEM[mem_addr+63:mem_addr+32]
+dst[127:96] := MEM[mem_addr+31:mem_addr]
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_stream_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="MOVNTPS" form="m128, xmm" xed="MOVNTPS_MEMdq_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_storeh_pi">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m64*" varname="mem_addr" etype="FP32" memwidth="64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store the upper 2 single-precision (32-bit) floating-point elements from "a" into memory.</description>
+ <operation>
+MEM[mem_addr+31:mem_addr] := a[95:64]
+MEM[mem_addr+63:mem_addr+32] := a[127:96]
+ </operation>
+ <instruction name="MOVHPS" form="m64, xmm" xed="MOVHPS_MEMq_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_storel_pi">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m64*" varname="mem_addr" etype="FP32" memwidth="64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store the lower 2 single-precision (32-bit) floating-point elements from "a" into memory.</description>
+ <operation>
+MEM[mem_addr+31:mem_addr] := a[31:0]
+MEM[mem_addr+63:mem_addr+32] := a[63:32]
+ </operation>
+ <instruction name="MOVLPS" form="m64, xmm" xed="MOVLPS_MEMq_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_store_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float*" varname="mem_addr" etype="FP32" memwidth="32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store the lower single-precision (32-bit) floating-point element from "a" into memory. "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+31:mem_addr] := a[31:0]
+ </operation>
+ <instruction name="MOVSS" form="m32, xmm" xed="MOVSS_MEMss_XMMss"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_store1_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float*" varname="mem_addr" etype="FP32" memwidth="32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store the lower single-precision (32-bit) floating-point element from "a" into 4 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+31:mem_addr] := a[31:0]
+MEM[mem_addr+63:mem_addr+32] := a[31:0]
+MEM[mem_addr+95:mem_addr+64] := a[31:0]
+MEM[mem_addr+127:mem_addr+96] := a[31:0]
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_store_ps1">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float*" varname="mem_addr" etype="FP32" memwidth="32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store the lower single-precision (32-bit) floating-point element from "a" into 4 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+31:mem_addr] := a[31:0]
+MEM[mem_addr+63:mem_addr+32] := a[31:0]
+MEM[mem_addr+95:mem_addr+64] := a[31:0]
+MEM[mem_addr+127:mem_addr+96] := a[31:0]
+ </operation>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_store_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory.
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="MOVAPS" form="m128, xmm" xed="MOVAPS_MEMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_storeu_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point elements) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="MOVUPS" form="m128, xmm" xed="MOVUPS_MEMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_storer_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="float*" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Store 4 single-precision (32-bit) floating-point elements from "a" into memory in reverse order.
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+31:mem_addr] := a[127:96]
+MEM[mem_addr+63:mem_addr+32] := a[95:64]
+MEM[mem_addr+95:mem_addr+64] := a[63:32]
+MEM[mem_addr+127:mem_addr+96] := a[31:0]
+ </operation>
+ <instruction name="MOVUPS" form="m128, xmm" xed="MOVUPS_MEMps_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_move_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Move the lower single-precision (32-bit) floating-point element from "b" to the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := b[31:0]
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="MOVSS" form="xmm, xmm" xed="MOVSS_XMMss_XMMss_0F10"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_shuffle_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="unsigned int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle single-precision (32-bit) floating-point elements in "a" using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+dst[95:64] := SELECT4(b[127:0], imm8[5:4])
+dst[127:96] := SELECT4(b[127:0], imm8[7:6])
+ </operation>
+ <instruction name="SHUFPS" form="xmm, xmm, imm8" xed="SHUFPS_XMMps_XMMps_IMMb"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_unpackhi_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the high half "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+ </operation>
+ <instruction name="UNPCKHPS" form="xmm, xmm" xed="UNPCKHPS_XMMps_XMMdq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_unpacklo_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Unpack and interleave single-precision (32-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+ </operation>
+ <instruction name="UNPCKLPS" form="xmm, xmm" xed="UNPCKLPS_XMMps_XMMq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_movehl_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Move the upper 2 single-precision (32-bit) floating-point elements from "b" to the lower 2 elements of "dst", and copy the upper 2 elements from "a" to the upper 2 elements of "dst".</description>
+ <operation>
+dst[31:0] := b[95:64]
+dst[63:32] := b[127:96]
+dst[95:64] := a[95:64]
+dst[127:96] := a[127:96]
+ </operation>
+ <instruction name="MOVHLPS" form="xmm, xmm" xed="MOVHLPS_XMMq_XMMq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_movelh_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Move the lower 2 single-precision (32-bit) floating-point elements from "b" to the upper 2 elements of "dst", and copy the lower 2 elements from "a" to the lower 2 elements of "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+dst[63:32] := a[63:32]
+dst[95:64] := b[31:0]
+dst[127:96] := b[63:32]
+ </operation>
+ <instruction name="MOVLHPS" form="xmm, xmm" xed="MOVLHPS_XMMq_XMMq"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" vexEq="TRUE" name="_mm_movemask_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Miscellaneous</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Set each bit of mask "dst" based on the most significant bit of the corresponding packed single-precision (32-bit) floating-point element in "a".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF a[i+31]
+ dst[j] := 1
+ ELSE
+ dst[j] := 0
+ FI
+ENDFOR
+dst[MAX:4] := 0
+ </operation>
+ <instruction name="MOVMSKPS" form="r32, xmm" xed="MOVMSKPS_GPR32_XMMps"/>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_malloc">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="void*"/>
+ <parameter type="size_t" varname="size" etype="UI64"/>
+ <parameter type="size_t" varname="align" etype="UI64"/>
+ <description>Allocate "size" bytes of memory, aligned to the alignment specified in "align", and return a pointer to the allocated memory. "_mm_free" should be used to free memory that is allocated with "_mm_malloc".</description>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_free">
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr"/>
+ <description>Free aligned memory that was allocated with "_mm_malloc".</description>
+ <header>xmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_undefined_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>General Support</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m128 with undefined elements.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_acos_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the inverse cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ACOS(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_acos_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the inverse cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ACOS(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_acosh_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ACOSH(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_acosh_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ACOSH(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_asin_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the inverse sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ASIN(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_asin_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the inverse sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ASIN(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_asinh_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ASINH(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_asinh_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ASINH(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_atan_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ATAN(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_atan_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ATAN(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_atan2_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the inverse tangent of packed double-precision (64-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ATAN2(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_atan2_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the inverse tangent of packed single-precision (32-bit) floating-point elements in "a" divided by packed elements in "b", and store the results in "dst" expressed in radians.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ATAN2(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_atanh_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the inverse hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ATANH(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_atanh_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the inverse hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ATANH(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cbrt_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := CubeRoot(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cbrt_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := CubeRoot(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cdfnorm_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := CDFNormal(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cdfnorm_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := CDFNormal(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cdfnorminv_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the inverse cumulative distribution function of packed double-precision (64-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := InverseCDFNormal(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cdfnorminv_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the inverse cumulative distribution function of packed single-precision (32-bit) floating-point elements in "a" using the normal distribution, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := InverseCDFNormal(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cexp_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of "e" raised to the power of packed complex numbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]".</description>
+ <operation>
+DEFINE CEXP(a[31:0], b[31:0]) {
+ result[31:0] := POW(FP32(e), a[31:0]) * COS(b[31:0])
+ result[63:32] := POW(FP32(e), a[31:0]) * SIN(b[31:0])
+ RETURN result
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := CEXP(a[i+31:i], a[i+63:i+32])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_clog_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the natural logarithm of packed complex numbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]".</description>
+ <operation>
+DEFINE CLOG(a[31:0], b[31:0]) {
+ result[31:0] := LOG(SQRT(POW(a, 2.0) + POW(b, 2.0)))
+ result[63:32] := ATAN2(b, a)
+ RETURN result
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := CLOG(a[i+31:i], a[i+63:i+32])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cos_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := COS(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cos_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := COS(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cosd_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := COSD(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cosd_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := COSD(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cosh_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := COSH(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_cosh_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := COSH(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_csqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed complex snumbers in "a", and store the complex results in "dst". Each complex number is composed of two adjacent single-precision (32-bit) floating-point elements, which defines the complex number "complex = vec.fp32[0] + i * vec.fp32[1]".</description>
+ <operation>
+DEFINE CSQRT(a[31:0], b[31:0]) {
+ sign[31:0] := (b &lt; 0.0) ? -FP32(1.0) : FP32(1.0)
+ result[31:0] := SQRT((a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0)
+ result[63:32] := sign * SQRT((-a + SQRT(POW(a, 2.0) + POW(b, 2.0))) / 2.0)
+ RETURN result
+}
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := CSQRT(a[i+31:i], a[i+63:i+32])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_div_epi8">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Divide packed signed 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 8*j
+ IF b[i+7:i] == 0
+ #DE
+ FI
+ dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_div_epi16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Divide packed signed 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ IF b[i+15:i] == 0
+ #DE
+ FI
+ dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_div_epi32">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF b[i+31:i] == 0
+ #DE
+ FI
+ dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_div_epi64">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Divide packed signed 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ IF b[i+63:i] == 0
+ #DE
+ FI
+ dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_div_epu8">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := 8*j
+ IF b[i+7:i] == 0
+ #DE
+ FI
+ dst[i+7:i] := Truncate8(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_div_epu16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := 16*j
+ IF b[i+15:i] == 0
+ #DE
+ FI
+ dst[i+15:i] := Truncate16(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_div_epu32">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ IF b[i+31:i] == 0
+ #DE
+ FI
+ dst[i+31:i] := Truncate32(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_div_epu64">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ IF b[i+63:i] == 0
+ #DE
+ FI
+ dst[i+63:i] := Truncate64(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_erf_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ERF(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_erf_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ERF(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_erfc_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := 1.0 - ERF(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_erfc_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+63:i] := 1.0 - ERF(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_erfcinv_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the inverse complementary error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+63:i]))
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_erfcinv_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the inverse complementary error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+63:i] := 1.0 / (1.0 - ERF(a[i+31:i]))
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_erfinv_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the inverse error function of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := 1.0 / ERF(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_erfinv_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the inverse error function of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+63:i] := 1.0 / ERF(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_exp_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := POW(e, a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_exp_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := POW(FP32(e), a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_exp10_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of 10 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := POW(10.0, a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_exp10_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of 10 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := POW(FP32(10.0), a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_exp2_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of 2 raised to the power of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := POW(2.0, a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_exp2_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of 2 raised to the power of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := POW(FP32(2.0), a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_expm1_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the exponential value of "e" raised to the power of packed double-precision (64-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := POW(e, a[i+63:i]) - 1.0
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_expm1_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the exponential value of "e" raised to the power of packed single-precision (32-bit) floating-point elements in "a", subtract one from each element, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := POW(FP32(e), a[i+31:i]) - 1.0
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_hypot_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := SQRT(POW(a[i+63:i], 2.0) + POW(b[i+63:i], 2.0))
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_hypot_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the length of the hypotenous of a right triangle, with the lengths of the other two sides of the triangle stored as packed single-precision (32-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := SQRT(POW(a[i+31:i], 2.0) + POW(b[i+31:i], 2.0))
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_idiv_epi32">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Divide packed 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_idivrem_epi32">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i *" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Divide packed 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed 32-bit integers into memory at "mem_addr".</description>
+ <operation>FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i])
+ MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_invcbrt_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the inverse cube root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := InvCubeRoot(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_invcbrt_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the inverse cube root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := InvCubeRoot(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_invsqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the inverse square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := InvSQRT(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_invsqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the inverse square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := InvSQRT(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_irem_epi32">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_log_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the natural logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := LOG(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_log_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the natural logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := LOG(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_log10_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the base-10 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := LOG(a[i+63:i]) / LOG(10.0)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_log10_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the base-10 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := LOG(a[i+31:i]) / LOG(10.0)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_log1p_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the natural logarithm of one plus packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := LOG(1.0 + a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_log1p_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the natural logarithm of one plus packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := LOG(1.0 + a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_log2_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the base-2 logarithm of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := LOG(a[i+63:i]) / LOG(2.0)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_log2_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the base-2 logarithm of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := LOG(a[i+31:i]) / LOG(2.0)
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_logb_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the exponent of each packed double-precision (64-bit) floating-point element in "a" to a double-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ConvertExpFP64(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_logb_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert the exponent of each packed single-precision (32-bit) floating-point element in "a" to a single-precision floating-point number representing the integer exponent, and store the results in "dst". This intrinsic essentially calculates "floor(log2(x))" for each element.</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ConvertExpFP32(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_pow_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the exponential value of packed double-precision (64-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := POW(a[i+63:i], b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_pow_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Compute the exponential value of packed single-precision (32-bit) floating-point elements in "a" raised by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := POW(a[i+31:i], b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_rem_epi8">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Divide packed 8-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := 8*j
+ dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_rem_epi16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Divide packed 16-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := 16*j
+ dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_rem_epi32">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Divide packed 32-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_rem_epi64">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Divide packed 64-bit integers in "a" by packed elements in "b", and store the remainders as packed 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := 64*j
+ dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_rem_epu8">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Divide packed unsigned 8-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 15
+ i := 8*j
+ dst[i+7:i] := REMAINDER(a[i+7:i] / b[i+7:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_rem_epu16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Divide packed unsigned 16-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 7
+ i := 16*j
+ dst[i+15:i] := REMAINDER(a[i+15:i] / b[i+15:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_rem_epu32">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_rem_epu64">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Divide packed unsigned 64-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := 64*j
+ dst[i+63:i] := REMAINDER(a[i+63:i] / b[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_sin_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := SIN(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_sin_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := SIN(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_sincos_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d *" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the sine and cosine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := SIN(a[i+63:i])
+ MEM[mem_addr+i+63:mem_addr+i] := COS(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_sincos_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128 *" varname="mem_addr" etype="FP32" memwidth="128"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the sine and cosine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, store the sine in "dst", and store the cosine into memory at "mem_addr".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := SIN(a[i+31:i])
+ MEM[mem_addr+i+31:mem_addr+i] := COS(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_sind_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the sine of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := SIND(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_sind_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the sine of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := SIND(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_sinh_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic sine of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := SINH(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_sinh_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic sine of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := SINH(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_svml_ceil_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := CEIL(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_svml_ceil_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := CEIL(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_svml_floor_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := FLOOR(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_svml_floor_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := FLOOR(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_svml_round_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ROUND(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_svml_round_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" to the nearest integer value, and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ROUND(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_svml_sqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_pd".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := SQRT(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_svml_sqrt_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the square root of packed single-precision (32-bit) floating-point elements in "a", and store the results in "dst". Note that this intrinsic is less efficient than "_mm_sqrt_ps".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := SQRT(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_tan_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := TAN(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_tan_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := TAN(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_tand_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := TAND(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_tand_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in degrees, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := TAND(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_tanh_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the hyperbolic tangent of packed double-precision (64-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := TANH(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_tanh_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Trigonometry</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Compute the hyperbolic tangent of packed single-precision (32-bit) floating-point elements in "a" expressed in radians, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := TANH(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_trunc_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Truncate the packed double-precision (64-bit) floating-point elements in "a", and store the results as packed double-precision floating-point elements in "dst". This intrinsic may generate the "roundpd"/"vroundpd" instruction.</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := TRUNCATE(a[i+63:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_trunc_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Truncate the packed single-precision (32-bit) floating-point elements in "a", and store the results as packed single-precision floating-point elements in "dst". This intrinsic may generate the "roundps"/"vroundps" instruction.</description>
+ <operation>FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := TRUNCATE(a[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_udiv_epi32">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the truncated results in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_udivrem_epi32">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i *" varname="mem_addr" etype="UI32" memwidth="128"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", store the truncated results in "dst", and store the remainders as packed unsigned 32-bit integers into memory at "mem_addr".</description>
+ <operation>FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := TRUNCATE(a[i+31:i] / b[i+31:i])
+ MEM[mem_addr+i+31:mem_addr+i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SVML" sequence="TRUE" name="_mm_urem_epi32">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Divide packed unsigned 32-bit integers in "a" by packed elements in "b", and store the remainders as packed unsigned 32-bit integers in "dst".</description>
+ <operation>FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := REMAINDER(a[i+31:i] / b[i+31:i])
+ENDFOR
+dst[MAX:128] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_storeu_si16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI16" memwidth="16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Store 16-bit integer from the first element of "a" into memory. "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+15:mem_addr] := a[15:0]
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_loadu_si64">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI64" memwidth="64"/>
+ <description>Load unaligned 64-bit integer from memory into the first element of "dst".</description>
+ <operation>
+dst[63:0] := MEM[mem_addr+63:mem_addr]
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="MOVQ" form="xmm, m64" xed="MOVQ_XMMdq_MEMq_0F6E"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" name="_mm_storeu_si64">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI64" memwidth="64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Store 64-bit integer from the first element of "a" into memory. "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+63:mem_addr] := a[63:0]
+ </operation>
+ <instruction name="MOVQ" form="m64, xmm" xed="MOVQ_MEMq_XMMq_0F7E"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE" sequence="TRUE" name="_mm_loadu_si16">
+ <type>Integer</type>
+ <CPUID>SSE</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI16" memwidth="16"/>
+ <description>Load unaligned 16-bit integer from memory into the first element of "dst".</description>
+ <operation>
+dst[15:0] := MEM[mem_addr+15:mem_addr]
+dst[MAX:16] := 0
+ </operation>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_undefined_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>General Support</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m128d with undefined elements.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_undefined_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>General Support</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m128i with undefined elements.</description>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_loadu_si32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="void const*" varname="mem_addr" etype="UI32" memwidth="32"/>
+ <description>Load unaligned 32-bit integer from memory into the first element of "dst".</description>
+ <operation>
+dst[31:0] := MEM[mem_addr+31:mem_addr]
+dst[MAX:32] := 0
+ </operation>
+ <instruction name="MOVD" form="xmm, m32" xed="MOVD_XMMdq_MEMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_storeu_si32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="void*" varname="mem_addr" etype="UI32" memwidth="32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Store 32-bit integer from the first element of "a" into memory. "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+31:mem_addr] := a[31:0]
+ </operation>
+ <instruction name="MOVD" form="m32, xmm" xed="MOVD_MEMd_XMMd"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_pause">
+ <CPUID>SSE2</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Provide a hint to the processor that the code sequence is a spin-wait loop. This can help improve the performance and power consumption of spin-wait loops.</description>
+ <instruction name="PAUSE" xed="PAUSE"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_clflush">
+ <CPUID>SSE2</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void const*" varname="p"/>
+ <description>Invalidate and flush the cache line that contains "p" from all levels of the cache hierarchy.</description>
+ <instruction name="CLFLUSH" form="m8" xed="CLFLUSH_MEMmprefetch"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_lfence">
+ <CPUID>SSE2</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Perform a serializing operation on all load-from-memory instructions that were issued prior to this instruction. Guarantees that every load instruction that precedes, in program order, is globally visible before any load instruction which follows the fence in program order.</description>
+ <instruction name="LFENCE" xed="LFENCE"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_mfence">
+ <CPUID>SSE2</CPUID>
+ <category>General Support</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Perform a serializing operation on all load-from-memory and store-to-memory instructions that were issued prior to this instruction. Guarantees that every memory access that precedes, in program order, the memory fence instruction is globally visible before any memory instruction which follows the fence in program order.</description>
+ <instruction name="MFENCE" xed="MFENCE"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_add_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Add packed 8-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := a[i+7:i] + b[i+7:i]
+ENDFOR
+ </operation>
+ <instruction name="PADDB" form="xmm, xmm" xed="PADDB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_add_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Add packed 16-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := a[i+15:i] + b[i+15:i]
+ENDFOR
+ </operation>
+ <instruction name="PADDW" form="xmm, xmm" xed="PADDW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_add_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Add packed 32-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="PADDD" form="xmm, xmm" xed="PADDD_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_add_si64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Add 64-bit integers "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := a[63:0] + b[63:0]
+ </operation>
+ <instruction name="PADDQ" form="mm, mm" xed="PADDQ_MMXq_MMXq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_add_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Add packed 64-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="PADDQ" form="xmm, xmm" xed="PADDQ_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_adds_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Add packed signed 8-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDSB" form="xmm, xmm" xed="PADDSB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_adds_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Add packed signed 16-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := Saturate16( a[i+15:i] + b[i+15:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDSW" form="xmm, xmm" xed="PADDSW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_adds_epu8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Add packed unsigned 8-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := SaturateU8( a[i+7:i] + b[i+7:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDUSB" form="xmm, xmm" xed="PADDUSB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_adds_epu16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Add packed unsigned 16-bit integers in "a" and "b" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := SaturateU16( a[i+15:i] + b[i+15:i] )
+ENDFOR
+ </operation>
+ <instruction name="PADDUSW" form="xmm, xmm" xed="PADDUSW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_avg_epu8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Average packed unsigned 8-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) &gt;&gt; 1
+ENDFOR
+ </operation>
+ <instruction name="PAVGB" form="xmm, xmm" xed="PAVGB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_avg_epu16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Probability/Statistics</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Average packed unsigned 16-bit integers in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) &gt;&gt; 1
+ENDFOR
+ </operation>
+ <instruction name="PAVGW" form="xmm, xmm" xed="PAVGW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_madd_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Horizontally add adjacent pairs of intermediate 32-bit integers, and pack the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := SignExtend32(a[i+31:i+16]*b[i+31:i+16]) + SignExtend32(a[i+15:i]*b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PMADDWD" form="xmm, xmm" xed="PMADDWD_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_max_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PMAXSW" form="xmm, xmm" xed="PMAXSW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_max_epu8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PMAXUB" form="xmm, xmm" xed="PMAXUB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_min_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PMINSW" form="xmm, xmm" xed="PMINSW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_min_epu8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed unsigned 8-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PMINUB" form="xmm, xmm" xed="PMINUB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_mulhi_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply the packed signed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+ </operation>
+ <instruction name="PMULHW" form="xmm, xmm" xed="PMULHW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_mulhi_epu16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Multiply the packed unsigned 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the high 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ tmp[31:0] := a[i+15:i] * b[i+15:i]
+ dst[i+15:i] := tmp[31:16]
+ENDFOR
+ </operation>
+ <instruction name="PMULHUW" form="xmm, xmm" xed="PMULHUW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_mullo_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Multiply the packed 16-bit integers in "a" and "b", producing intermediate 32-bit integers, and store the low 16 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ tmp[31:0] := SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])
+ dst[i+15:i] := tmp[15:0]
+ENDFOR
+ </operation>
+ <instruction name="PMULLW" form="xmm, xmm" xed="PMULLW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_mul_su32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI32"/>
+ <parameter type="__m64" varname="b" etype="UI32"/>
+ <description>Multiply the low unsigned 32-bit integers from "a" and "b", and store the unsigned 64-bit result in "dst".</description>
+ <operation>
+dst[63:0] := a[31:0] * b[31:0]
+ </operation>
+ <instruction name="PMULUDQ" form="mm, mm" xed="PMULUDQ_MMXq_MMXq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_mul_epu32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Multiply the low unsigned 32-bit integers from each packed 64-bit element in "a" and "b", and store the unsigned 64-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[i+31:i] * b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="PMULUDQ" form="xmm, xmm" xed="PMULUDQ_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sad_epu8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compute the absolute differences of packed unsigned 8-bit integers in "a" and "b", then horizontally sum each consecutive 8 differences to produce two unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low 16 bits of 64-bit elements in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i])
+ENDFOR
+FOR j := 0 to 1
+ i := j*64
+ dst[i+15:i] := tmp[i+7:i] + tmp[i+15:i+8] + tmp[i+23:i+16] + tmp[i+31:i+24] + \
+ tmp[i+39:i+32] + tmp[i+47:i+40] + tmp[i+55:i+48] + tmp[i+63:i+56]
+ dst[i+63:i+16] := 0
+ENDFOR
+ </operation>
+ <instruction name="PSADBW" form="xmm, xmm" xed="PSADBW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sub_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Subtract packed 8-bit integers in "b" from packed 8-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := a[i+7:i] - b[i+7:i]
+ENDFOR
+ </operation>
+ <instruction name="PSUBB" form="xmm, xmm" xed="PSUBB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sub_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Subtract packed 16-bit integers in "b" from packed 16-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := a[i+15:i] - b[i+15:i]
+ENDFOR
+ </operation>
+ <instruction name="PSUBW" form="xmm, xmm" xed="PSUBW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sub_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Subtract packed 32-bit integers in "b" from packed 32-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ENDFOR
+ </operation>
+ <instruction name="PSUBD" form="xmm, xmm" xed="PSUBD_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sub_si64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <parameter type="__m64" varname="b" etype="UI64"/>
+ <description>Subtract 64-bit integer "b" from 64-bit integer "a", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := a[63:0] - b[63:0]
+ </operation>
+ <instruction name="PSUBQ" form="mm, mm" xed="PSUBQ_MMXq_MMXq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sub_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Subtract packed 64-bit integers in "b" from packed 64-bit integers in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="PSUBQ" form="xmm, xmm" xed="PSUBQ_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_subs_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Subtract packed signed 8-bit integers in "b" from packed 8-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := Saturate8(a[i+7:i] - b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBSB" form="xmm, xmm" xed="PSUBSB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_subs_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Subtract packed signed 16-bit integers in "b" from packed 16-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := Saturate16(a[i+15:i] - b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBSW" form="xmm, xmm" xed="PSUBSW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_subs_epu8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Subtract packed unsigned 8-bit integers in "b" from packed unsigned 8-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := SaturateU8(a[i+7:i] - b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBUSB" form="xmm, xmm" xed="PSUBUSB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_subs_epu16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Subtract packed unsigned 16-bit integers in "b" from packed unsigned 16-bit integers in "a" using saturation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := SaturateU16(a[i+15:i] - b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PSUBUSW" form="xmm, xmm" xed="PSUBUSW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_slli_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+tmp := imm8[7:0]
+IF tmp &gt; 15
+ tmp := 16
+FI
+dst[127:0] := a[127:0] &lt;&lt; (tmp*8)
+ </operation>
+ <instruction name="PSLLDQ" form="xmm, imm8" xed="PSLLDQ_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_bslli_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift "a" left by "imm8" bytes while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+tmp := imm8[7:0]
+IF tmp &gt; 15
+ tmp := 16
+FI
+dst[127:0] := a[127:0] &lt;&lt; (tmp*8)
+ </operation>
+ <instruction name="PSLLDQ" form="xmm, imm8" xed="PSLLDQ_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_bsrli_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+tmp := imm8[7:0]
+IF tmp &gt; 15
+ tmp := 16
+FI
+dst[127:0] := a[127:0] &gt;&gt; (tmp*8)
+ </operation>
+ <instruction name="PSRLDQ" form="xmm, imm8" xed="PSRLDQ_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_slli_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLW" form="xmm, imm8" xed="PSLLW_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sll_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLW" form="xmm, xmm" xed="PSLLW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_slli_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLD" form="xmm, imm8" xed="PSLLD_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sll_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLD" form="xmm, xmm" xed="PSLLD_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_slli_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" left by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLQ" form="xmm, imm8" xed="PSLLQ_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sll_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" left by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &lt;&lt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSLLQ" form="xmm, xmm" xed="PSLLQ_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_srai_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAW" form="xmm, imm8" xed="PSRAW_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sra_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0)
+ ELSE
+ dst[i+15:i] := SignExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAW" form="xmm, xmm" xed="PSRAW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_srai_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAD" form="xmm, imm8" xed="PSRAD_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sra_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in sign bits, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0)
+ ELSE
+ dst[i+31:i] := SignExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRAD" form="xmm, xmm" xed="PSRAD_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_srli_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift "a" right by "imm8" bytes while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+tmp := imm8[7:0]
+IF tmp &gt; 15
+ tmp := 16
+FI
+dst[127:0] := a[127:0] &gt;&gt; (tmp*8)
+ </operation>
+ <instruction name="PSRLDQ" form="xmm, imm8" xed="PSRLDQ_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_srli_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 16-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF imm8[7:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLW" form="xmm, imm8" xed="PSRLW_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_srl_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="count" etype="UI16"/>
+ <description>Shift packed 16-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF count[63:0] &gt; 15
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := ZeroExtend16(a[i+15:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLW" form="xmm, xmm" xed="PSRLW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_srli_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 32-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF imm8[7:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLD" form="xmm, imm8" xed="PSRLD_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_srl_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="count" etype="UI32"/>
+ <description>Shift packed 32-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF count[63:0] &gt; 31
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := ZeroExtend32(a[i+31:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLD" form="xmm, xmm" xed="PSRLD_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_srli_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shift packed 64-bit integers in "a" right by "imm8" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF imm8[7:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; imm8[7:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLQ" form="xmm, imm8" xed="PSRLQ_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_srl_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Shift</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="count" etype="UI64"/>
+ <description>Shift packed 64-bit integers in "a" right by "count" while shifting in zeros, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF count[63:0] &gt; 63
+ dst[i+63:i] := 0
+ ELSE
+ dst[i+63:i] := ZeroExtend64(a[i+63:i] &gt;&gt; count[63:0])
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSRLQ" form="xmm, xmm" xed="PSRLQ_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_and_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <description>Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[127:0] := (a[127:0] AND b[127:0])
+ </operation>
+ <instruction name="PAND" form="xmm, xmm" xed="PAND_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_andnot_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <description>Compute the bitwise NOT of 128 bits (representing integer data) in "a" and then AND with "b", and store the result in "dst".</description>
+ <operation>
+dst[127:0] := ((NOT a[127:0]) AND b[127:0])
+ </operation>
+ <instruction name="PANDN" form="xmm, xmm" xed="PANDN_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_or_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <description>Compute the bitwise OR of 128 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[127:0] := (a[127:0] OR b[127:0])
+ </operation>
+ <instruction name="POR" form="xmm, xmm" xed="POR_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_xor_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Logical</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <description>Compute the bitwise XOR of 128 bits (representing integer data) in "a" and "b", and store the result in "dst".</description>
+ <operation>
+dst[127:0] := (a[127:0] XOR b[127:0])
+ </operation>
+ <instruction name="PXOR" form="xmm, xmm" xed="PXOR_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpeq_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Compare packed 8-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := ( a[i+7:i] == b[i+7:i] ) ? 0xFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPEQB" form="xmm, xmm" xed="PCMPEQB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpeq_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed 16-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := ( a[i+15:i] == b[i+15:i] ) ? 0xFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPEQW" form="xmm, xmm" xed="PCMPEQW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpeq_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed 32-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] == b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPEQD" form="xmm, xmm" xed="PCMPEQD_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpgt_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := ( a[i+7:i] &gt; b[i+7:i] ) ? 0xFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTB" form="xmm, xmm" xed="PCMPGTB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpgt_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := ( a[i+15:i] &gt; b[i+15:i] ) ? 0xFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTW" form="xmm, xmm" xed="PCMPGTW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpgt_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] &gt; b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTD" form="xmm, xmm" xed="PCMPGTD_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmplt_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b" for less-than, and store the results in "dst". Note: This intrinsic emits the pcmpgtb instruction with the order of the operands switched.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := ( a[i+7:i] &lt; b[i+7:i] ) ? 0xFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTB" form="xmm, xmm" xed="PCMPGTB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmplt_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Compare packed signed 16-bit integers in "a" and "b" for less-than, and store the results in "dst". Note: This intrinsic emits the pcmpgtw instruction with the order of the operands switched.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := ( a[i+15:i] &lt; b[i+15:i] ) ? 0xFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTW" form="xmm, xmm" xed="PCMPGTW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmplt_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b" for less-than, and store the results in "dst". Note: This intrinsic emits the pcmpgtd instruction with the order of the operands switched.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ( a[i+31:i] &lt; b[i+31:i] ) ? 0xFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTD" form="xmm, xmm" xed="PCMPGTD_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtepi32_pd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="CVTDQ2PD" form="xmm, xmm" xed="CVTDQ2PD_XMMpd_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsi32_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="b" etype="SI32"/>
+ <description>Convert the signed 32-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := Convert_Int32_To_FP64(b[31:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="CVTSI2SD" form="xmm, r32" xed="CVTSI2SD_XMMsd_GPR32d"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsi64_sd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__int64" varname="b" etype="SI64"/>
+ <description>Convert the signed 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := Convert_Int64_To_FP64(b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="CVTSI2SD" form="xmm, r64" xed="CVTSI2SD_XMMsd_GPR64q"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsi64x_sd">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__int64" varname="b" etype="SI64"/>
+ <description>Convert the signed 64-bit integer "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := Convert_Int64_To_FP64(b[63:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="CVTSI2SD" form="xmm, r64" xed="CVTSI2SD_XMMsd_GPR64q"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtepi32_ps">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := Convert_Int32_To_FP32(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="CVTDQ2PS" form="xmm, xmm" xed="CVTDQ2PS_XMMps_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_cvtpi32_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m64" varname="a" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ m := j*64
+ dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="CVTPI2PD" form="xmm, mm" xed="CVTPI2PD_XMMpd_MMXq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsi32_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Copy 32-bit integer "a" to the lower elements of "dst", and zero the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+dst[127:32] := 0
+ </operation>
+ <instruction name="MOVD" form="xmm, r32" xed="MOVD_XMMdq_GPR32"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsi64_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Copy 64-bit integer "a" to the lower element of "dst", and zero the upper element.</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[127:64] := 0
+ </operation>
+ <instruction name="MOVQ" form="xmm, r64" xed="MOVQ_XMMdq_GPR64"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsi64x_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Copy 64-bit integer "a" to the lower element of "dst", and zero the upper element.</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[127:64] := 0
+ </operation>
+ <instruction name="MOVQ" form="xmm, r64" xed="MOVQ_XMMdq_GPR64"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsi128_si32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Copy the lower 32-bit integer in "a" to "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+ </operation>
+ <instruction name="MOVD" form="r32, xmm" xed="MOVD_GPR32_XMMd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsi128_si64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Copy the lower 64-bit integer in "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+ </operation>
+ <instruction name="MOVQ" form="r64, xmm" xed="MOVQ_GPR64_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsi128_si64x">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Copy the lower 64-bit integer in "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+ </operation>
+ <instruction name="MOVQ" form="r64, xmm" xed="MOVQ_GPR64_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m64" varname="e1" etype="UI64"/>
+ <parameter type="__m64" varname="e0" etype="UI64"/>
+ <description>Set packed 64-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[63:0] := e0
+dst[127:64] := e1
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set_epi64x">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="e1" etype="UI64"/>
+ <parameter type="__int64" varname="e0" etype="UI64"/>
+ <description>Set packed 64-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[63:0] := e0
+dst[127:64] := e1
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="e3" etype="UI32"/>
+ <parameter type="int" varname="e2" etype="UI32"/>
+ <parameter type="int" varname="e1" etype="UI32"/>
+ <parameter type="int" varname="e0" etype="UI32"/>
+ <description>Set packed 32-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[31:0] := e0
+dst[63:32] := e1
+dst[95:64] := e2
+dst[127:96] := e3
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="short" varname="e7" etype="UI16"/>
+ <parameter type="short" varname="e6" etype="UI16"/>
+ <parameter type="short" varname="e5" etype="UI16"/>
+ <parameter type="short" varname="e4" etype="UI16"/>
+ <parameter type="short" varname="e3" etype="UI16"/>
+ <parameter type="short" varname="e2" etype="UI16"/>
+ <parameter type="short" varname="e1" etype="UI16"/>
+ <parameter type="short" varname="e0" etype="UI16"/>
+ <description>Set packed 16-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[15:0] := e0
+dst[31:16] := e1
+dst[47:32] := e2
+dst[63:48] := e3
+dst[79:64] := e4
+dst[95:80] := e5
+dst[111:96] := e6
+dst[127:112] := e7
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="char" varname="e15" etype="UI8"/>
+ <parameter type="char" varname="e14" etype="UI8"/>
+ <parameter type="char" varname="e13" etype="UI8"/>
+ <parameter type="char" varname="e12" etype="UI8"/>
+ <parameter type="char" varname="e11" etype="UI8"/>
+ <parameter type="char" varname="e10" etype="UI8"/>
+ <parameter type="char" varname="e9" etype="UI8"/>
+ <parameter type="char" varname="e8" etype="UI8"/>
+ <parameter type="char" varname="e7" etype="UI8"/>
+ <parameter type="char" varname="e6" etype="UI8"/>
+ <parameter type="char" varname="e5" etype="UI8"/>
+ <parameter type="char" varname="e4" etype="UI8"/>
+ <parameter type="char" varname="e3" etype="UI8"/>
+ <parameter type="char" varname="e2" etype="UI8"/>
+ <parameter type="char" varname="e1" etype="UI8"/>
+ <parameter type="char" varname="e0" etype="UI8"/>
+ <description>Set packed 8-bit integers in "dst" with the supplied values.</description>
+ <operation>
+dst[7:0] := e0
+dst[15:8] := e1
+dst[23:16] := e2
+dst[31:24] := e3
+dst[39:32] := e4
+dst[47:40] := e5
+dst[55:48] := e6
+dst[63:56] := e7
+dst[71:64] := e8
+dst[79:72] := e9
+dst[87:80] := e10
+dst[95:88] := e11
+dst[103:96] := e12
+dst[111:104] := e13
+dst[119:112] := e14
+dst[127:120] := e15
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set1_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <description>Broadcast 64-bit integer "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set1_epi64x">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Broadcast 64-bit integer "a" to all elements of "dst". This intrinsic may generate the "vpbroadcastq".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set1_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Broadcast 32-bit integer "a" to all elements of "dst". This intrinsic may generate "vpbroadcastd".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := a[31:0]
+ENDFOR
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set1_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="short" varname="a" etype="UI16"/>
+ <description>Broadcast 16-bit integer "a" to all all elements of "dst". This intrinsic may generate "vpbroadcastw".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := a[15:0]
+ENDFOR
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set1_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="char" varname="a" etype="UI8"/>
+ <description>Broadcast 8-bit integer "a" to all elements of "dst". This intrinsic may generate "vpbroadcastb".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := a[7:0]
+ENDFOR
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_setr_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m64" varname="e1" etype="UI64"/>
+ <parameter type="__m64" varname="e0" etype="UI64"/>
+ <description>Set packed 64-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[63:0] := e1
+dst[127:64] := e0
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_setr_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="int" varname="e3" etype="UI32"/>
+ <parameter type="int" varname="e2" etype="UI32"/>
+ <parameter type="int" varname="e1" etype="UI32"/>
+ <parameter type="int" varname="e0" etype="UI32"/>
+ <description>Set packed 32-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[31:0] := e3
+dst[63:32] := e2
+dst[95:64] := e1
+dst[127:96] := e0
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_setr_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="short" varname="e7" etype="UI16"/>
+ <parameter type="short" varname="e6" etype="UI16"/>
+ <parameter type="short" varname="e5" etype="UI16"/>
+ <parameter type="short" varname="e4" etype="UI16"/>
+ <parameter type="short" varname="e3" etype="UI16"/>
+ <parameter type="short" varname="e2" etype="UI16"/>
+ <parameter type="short" varname="e1" etype="UI16"/>
+ <parameter type="short" varname="e0" etype="UI16"/>
+ <description>Set packed 16-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[15:0] := e7
+dst[31:16] := e6
+dst[47:32] := e5
+dst[63:48] := e4
+dst[79:64] := e3
+dst[95:80] := e2
+dst[111:96] := e1
+dst[127:112] := e0
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_setr_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="char" varname="e15" etype="UI8"/>
+ <parameter type="char" varname="e14" etype="UI8"/>
+ <parameter type="char" varname="e13" etype="UI8"/>
+ <parameter type="char" varname="e12" etype="UI8"/>
+ <parameter type="char" varname="e11" etype="UI8"/>
+ <parameter type="char" varname="e10" etype="UI8"/>
+ <parameter type="char" varname="e9" etype="UI8"/>
+ <parameter type="char" varname="e8" etype="UI8"/>
+ <parameter type="char" varname="e7" etype="UI8"/>
+ <parameter type="char" varname="e6" etype="UI8"/>
+ <parameter type="char" varname="e5" etype="UI8"/>
+ <parameter type="char" varname="e4" etype="UI8"/>
+ <parameter type="char" varname="e3" etype="UI8"/>
+ <parameter type="char" varname="e2" etype="UI8"/>
+ <parameter type="char" varname="e1" etype="UI8"/>
+ <parameter type="char" varname="e0" etype="UI8"/>
+ <description>Set packed 8-bit integers in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[7:0] := e15
+dst[15:8] := e14
+dst[23:16] := e13
+dst[31:24] := e12
+dst[39:32] := e11
+dst[47:40] := e10
+dst[55:48] := e9
+dst[63:56] := e8
+dst[71:64] := e7
+dst[79:72] := e6
+dst[87:80] := e5
+dst[95:88] := e4
+dst[103:96] := e3
+dst[111:104] := e2
+dst[119:112] := e1
+dst[127:120] := e0
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_setzero_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <description>Return vector of type __m128i with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="PXOR" form="xmm, xmm" xed="PXOR_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_loadl_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i const*" varname="mem_addr" etype="UI64" memwidth="64"/>
+ <description>Load 64-bit integer from memory into the first element of "dst".</description>
+ <operation>
+dst[63:0] := MEM[mem_addr+63:mem_addr]
+dst[MAX:64] := 0
+ </operation>
+ <instruction name="MOVQ" form="xmm, m64" xed="MOVQ_XMMdq_MEMq_0F7E"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_load_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i const*" varname="mem_addr" etype="M128" memwidth="128"/>
+ <description>Load 128-bits of integer data from memory into "dst".
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+ </operation>
+ <instruction name="MOVDQA" form="xmm, m128" xed="MOVDQA_XMMdq_MEMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_loadu_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i const*" varname="mem_addr" etype="M128" memwidth="128"/>
+ <description>Load 128-bits of integer data from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+ </operation>
+ <instruction name="MOVDQU" form="xmm, m128" xed="MOVDQU_XMMdq_MEMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_maskmoveu_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="mask" etype="UI8"/>
+ <parameter type="char*" varname="mem_addr" etype="UI8" memwidth="128"/>
+ <description>Conditionally store 8-bit integer elements from "a" into memory using "mask" (elements are not stored when the highest bit is not set in the corresponding element) and a non-temporal memory hint. "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF mask[i+7]
+ MEM[mem_addr+i+7:mem_addr+i] := a[i+7:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="MASKMOVDQU" form="xmm, xmm" xed="MASKMOVDQU_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_store_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m128i*" varname="mem_addr" etype="M128" memwidth="128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <description>Store 128-bits of integer data from "a" into memory.
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="MOVDQA" form="m128, xmm" xed="MOVDQA_MEMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_storeu_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m128i*" varname="mem_addr" etype="M128" memwidth="128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <description>Store 128-bits of integer data from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="MOVDQU" form="m128, xmm" xed="MOVDQU_MEMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_storel_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m128i*" varname="mem_addr" etype="UI64" memwidth="64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Store 64-bit integer from the first element of "a" into memory.</description>
+ <operation>
+MEM[mem_addr+63:mem_addr] := a[63:0]
+ </operation>
+ <instruction name="MOVQ" form="m64, xmm" xed="MOVQ_MEMq_XMMq_0F7E"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_stream_si128">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__m128i*" varname="mem_addr" etype="M128" memwidth="128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <description>Store 128-bits of integer data from "a" into memory using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="MOVNTDQ" form="m128, xmm" xed="MOVNTDQ_MEMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_stream_si32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="int*" varname="mem_addr" etype="UI32" memwidth="32"/>
+ <parameter type="int" varname="a" etype="UI32"/>
+ <description>Store 32-bit integer "a" into memory using a non-temporal hint to minimize cache pollution. If the cache line containing address "mem_addr" is already in the cache, the cache will be updated.</description>
+ <operation>
+MEM[mem_addr+31:mem_addr] := a[31:0]
+ </operation>
+ <instruction name="MOVNTI" form="m32, r32" xed="MOVNTI_MEMd_GPR32"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_stream_si64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="__int64*" varname="mem_addr" etype="UI64" memwidth="64"/>
+ <parameter type="__int64" varname="a" etype="UI64"/>
+ <description>Store 64-bit integer "a" into memory using a non-temporal hint to minimize cache pollution. If the cache line containing address "mem_addr" is already in the cache, the cache will be updated.</description>
+ <operation>
+MEM[mem_addr+63:mem_addr] := a[63:0]
+ </operation>
+ <instruction name="MOVNTI" form="m64, r64" xed="MOVNTI_MEMq_GPR64"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_movepi64_pi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Copy the lower 64-bit integer in "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+ </operation>
+ <instruction name="MOVDQ2Q" form="mm, xmm" xed="MOVDQ2Q_MMXq_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_movpi64_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Move</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m64" varname="a" etype="UI64"/>
+ <description>Copy the 64-bit integer "a" to the lower element of "dst", and zero the upper element.</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[127:64] := 0
+ </operation>
+ <instruction name="MOVQ2DQ" form="xmm, mm" xed="MOVQ2DQ_XMMdq_MMXq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_move_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Move</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Copy the lower 64-bit integer in "a" to the lower element of "dst", and zero the upper element.</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[127:64] := 0
+ </operation>
+ <instruction name="MOVQ" form="xmm, xmm" xed="MOVQ_XMMdq_XMMq_0F7E"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_packs_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="SI8"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using signed saturation, and store the results in "dst".</description>
+ <operation>
+dst[7:0] := Saturate8(a[15:0])
+dst[15:8] := Saturate8(a[31:16])
+dst[23:16] := Saturate8(a[47:32])
+dst[31:24] := Saturate8(a[63:48])
+dst[39:32] := Saturate8(a[79:64])
+dst[47:40] := Saturate8(a[95:80])
+dst[55:48] := Saturate8(a[111:96])
+dst[63:56] := Saturate8(a[127:112])
+dst[71:64] := Saturate8(b[15:0])
+dst[79:72] := Saturate8(b[31:16])
+dst[87:80] := Saturate8(b[47:32])
+dst[95:88] := Saturate8(b[63:48])
+dst[103:96] := Saturate8(b[79:64])
+dst[111:104] := Saturate8(b[95:80])
+dst[119:112] := Saturate8(b[111:96])
+dst[127:120] := Saturate8(b[127:112])
+ </operation>
+ <instruction name="PACKSSWB" form="xmm, xmm" xed="PACKSSWB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_packs_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using signed saturation, and store the results in "dst".</description>
+ <operation>
+dst[15:0] := Saturate16(a[31:0])
+dst[31:16] := Saturate16(a[63:32])
+dst[47:32] := Saturate16(a[95:64])
+dst[63:48] := Saturate16(a[127:96])
+dst[79:64] := Saturate16(b[31:0])
+dst[95:80] := Saturate16(b[63:32])
+dst[111:96] := Saturate16(b[95:64])
+dst[127:112] := Saturate16(b[127:96])
+ </operation>
+ <instruction name="PACKSSDW" form="xmm, xmm" xed="PACKSSDW_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_packus_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Convert packed signed 16-bit integers from "a" and "b" to packed 8-bit integers using unsigned saturation, and store the results in "dst".</description>
+ <operation>
+dst[7:0] := SaturateU8(a[15:0])
+dst[15:8] := SaturateU8(a[31:16])
+dst[23:16] := SaturateU8(a[47:32])
+dst[31:24] := SaturateU8(a[63:48])
+dst[39:32] := SaturateU8(a[79:64])
+dst[47:40] := SaturateU8(a[95:80])
+dst[55:48] := SaturateU8(a[111:96])
+dst[63:56] := SaturateU8(a[127:112])
+dst[71:64] := SaturateU8(b[15:0])
+dst[79:72] := SaturateU8(b[31:16])
+dst[87:80] := SaturateU8(b[47:32])
+dst[95:88] := SaturateU8(b[63:48])
+dst[103:96] := SaturateU8(b[79:64])
+dst[111:104] := SaturateU8(b[95:80])
+dst[119:112] := SaturateU8(b[111:96])
+dst[127:120] := SaturateU8(b[127:112])
+ </operation>
+ <instruction name="PACKUSWB" form="xmm, xmm" xed="PACKUSWB_XMMdq_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_extract_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="int" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="3"/>
+ <description>Extract a 16-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst".</description>
+ <operation>
+dst[15:0] := (a[127:0] &gt;&gt; (imm8[2:0] * 16))[15:0]
+dst[31:16] := 0
+ </operation>
+ <instruction name="PEXTRW" form="r32, xmm, imm8" xed="PEXTRW_GPR32_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_insert_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="i" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="3"/>
+ <description>Copy "a" to "dst", and insert the 16-bit integer "i" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[127:0] := a[127:0]
+sel := imm8[2:0]*16
+dst[sel+15:sel] := i[15:0]
+ </operation>
+ <instruction name="PINSRW" form="xmm, r32, imm8" xed="PINSRW_XMMdq_GPR32_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_movemask_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="int" varname="dst" etype="MASK"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Create mask from the most significant bit of each 8-bit element in "a", and store the result in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[j] := a[i+7]
+ENDFOR
+dst[MAX:16] := 0
+ </operation>
+ <instruction name="PMOVMSKB" form="r32, xmm" xed="PMOVMSKB_GPR32_XMMdq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_shuffle_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 32-bit integers in "a" using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+DEFINE SELECT4(src, control) {
+ CASE(control[1:0]) OF
+ 0: tmp[31:0] := src[31:0]
+ 1: tmp[31:0] := src[63:32]
+ 2: tmp[31:0] := src[95:64]
+ 3: tmp[31:0] := src[127:96]
+ ESAC
+ RETURN tmp[31:0]
+}
+dst[31:0] := SELECT4(a[127:0], imm8[1:0])
+dst[63:32] := SELECT4(a[127:0], imm8[3:2])
+dst[95:64] := SELECT4(a[127:0], imm8[5:4])
+dst[127:96] := SELECT4(a[127:0], imm8[7:6])
+ </operation>
+ <instruction name="PSHUFD" form="xmm, xmm, imm8" xed="PSHUFD_XMMdq_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_shufflehi_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the high 64 bits of "a" using the control in "imm8". Store the results in the high 64 bits of "dst", with the low 64 bits being copied from from "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[79:64] := (a &gt;&gt; (imm8[1:0] * 16))[79:64]
+dst[95:80] := (a &gt;&gt; (imm8[3:2] * 16))[79:64]
+dst[111:96] := (a &gt;&gt; (imm8[5:4] * 16))[79:64]
+dst[127:112] := (a &gt;&gt; (imm8[7:6] * 16))[79:64]
+ </operation>
+ <instruction name="PSHUFHW" form="xmm, xmm, imm8" xed="PSHUFHW_XMMdq_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_shufflelo_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Shuffle 16-bit integers in the low 64 bits of "a" using the control in "imm8". Store the results in the low 64 bits of "dst", with the high 64 bits being copied from from "a" to "dst".</description>
+ <operation>
+dst[15:0] := (a &gt;&gt; (imm8[1:0] * 16))[15:0]
+dst[31:16] := (a &gt;&gt; (imm8[3:2] * 16))[15:0]
+dst[47:32] := (a &gt;&gt; (imm8[5:4] * 16))[15:0]
+dst[63:48] := (a &gt;&gt; (imm8[7:6] * 16))[15:0]
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="PSHUFLW" form="xmm, xmm, imm8" xed="PSHUFLW_XMMdq_XMMdq_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_unpackhi_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the high half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[71:64]
+ dst[15:8] := src2[71:64]
+ dst[23:16] := src1[79:72]
+ dst[31:24] := src2[79:72]
+ dst[39:32] := src1[87:80]
+ dst[47:40] := src2[87:80]
+ dst[55:48] := src1[95:88]
+ dst[63:56] := src2[95:88]
+ dst[71:64] := src1[103:96]
+ dst[79:72] := src2[103:96]
+ dst[87:80] := src1[111:104]
+ dst[95:88] := src2[111:104]
+ dst[103:96] := src1[119:112]
+ dst[111:104] := src2[119:112]
+ dst[119:112] := src1[127:120]
+ dst[127:120] := src2[127:120]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_BYTES(a[127:0], b[127:0])
+ </operation>
+ <instruction name="PUNPCKHBW" form="xmm, xmm" xed="PUNPCKHBW_XMMdq_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_unpackhi_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the high half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[79:64]
+ dst[31:16] := src2[79:64]
+ dst[47:32] := src1[95:80]
+ dst[63:48] := src2[95:80]
+ dst[79:64] := src1[111:96]
+ dst[95:80] := src2[111:96]
+ dst[111:96] := src1[127:112]
+ dst[127:112] := src2[127:112]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_WORDS(a[127:0], b[127:0])
+ </operation>
+ <instruction name="PUNPCKHWD" form="xmm, xmm" xed="PUNPCKHWD_XMMdq_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_unpackhi_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the high half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[95:64]
+ dst[63:32] := src2[95:64]
+ dst[95:64] := src1[127:96]
+ dst[127:96] := src2[127:96]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_DWORDS(a[127:0], b[127:0])
+ </operation>
+ <instruction name="PUNPCKHDQ" form="xmm, xmm" xed="PUNPCKHDQ_XMMdq_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_unpackhi_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the high half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+ </operation>
+ <instruction name="PUNPCKHQDQ" form="xmm, xmm" xed="PUNPCKHQDQ_XMMdq_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_unpacklo_epi8">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Unpack and interleave 8-bit integers from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_BYTES(src1[127:0], src2[127:0]) {
+ dst[7:0] := src1[7:0]
+ dst[15:8] := src2[7:0]
+ dst[23:16] := src1[15:8]
+ dst[31:24] := src2[15:8]
+ dst[39:32] := src1[23:16]
+ dst[47:40] := src2[23:16]
+ dst[55:48] := src1[31:24]
+ dst[63:56] := src2[31:24]
+ dst[71:64] := src1[39:32]
+ dst[79:72] := src2[39:32]
+ dst[87:80] := src1[47:40]
+ dst[95:88] := src2[47:40]
+ dst[103:96] := src1[55:48]
+ dst[111:104] := src2[55:48]
+ dst[119:112] := src1[63:56]
+ dst[127:120] := src2[63:56]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_BYTES(a[127:0], b[127:0])
+ </operation>
+ <instruction name="PUNPCKLBW" form="xmm, xmm" xed="PUNPCKLBW_XMMdq_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_unpacklo_epi16">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Unpack and interleave 16-bit integers from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_WORDS(src1[127:0], src2[127:0]) {
+ dst[15:0] := src1[15:0]
+ dst[31:16] := src2[15:0]
+ dst[47:32] := src1[31:16]
+ dst[63:48] := src2[31:16]
+ dst[79:64] := src1[47:32]
+ dst[95:80] := src2[47:32]
+ dst[111:96] := src1[63:48]
+ dst[127:112] := src2[63:48]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_WORDS(a[127:0], b[127:0])
+ </operation>
+ <instruction name="PUNPCKLWD" form="xmm, xmm" xed="PUNPCKLWD_XMMdq_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_unpacklo_epi32">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Unpack and interleave 32-bit integers from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_DWORDS(src1[127:0], src2[127:0]) {
+ dst[31:0] := src1[31:0]
+ dst[63:32] := src2[31:0]
+ dst[95:64] := src1[63:32]
+ dst[127:96] := src2[63:32]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_DWORDS(a[127:0], b[127:0])
+ </operation>
+ <instruction name="PUNPCKLDQ" form="xmm, xmm" xed="PUNPCKLDQ_XMMdq_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_unpacklo_epi64">
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Unpack and interleave 64-bit integers from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+ </operation>
+ <instruction name="PUNPCKLQDQ" form="xmm, xmm" xed="PUNPCKLQDQ_XMMdq_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_add_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Add the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := a[63:0] + b[63:0]
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="ADDSD" form="xmm, xmm" xed="ADDSD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_add_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Add packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="ADDPD" form="xmm, xmm" xed="ADDPD_XMMpd_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_div_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Divide the lower double-precision (64-bit) floating-point element in "a" by the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := a[63:0] / b[63:0]
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="DIVSD" form="xmm, xmm" xed="DIVSD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_div_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Divide packed double-precision (64-bit) floating-point elements in "a" by packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ dst[i+63:i] := a[i+63:i] / b[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="DIVPD" form="xmm, xmm" xed="DIVPD_XMMpd_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_max_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the maximum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := MAX(a[63:0], b[63:0])
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="MAXSD" form="xmm, xmm" xed="MAXSD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_max_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := MAX(a[i+63:i], b[i+63:i])
+ENDFOR
+ </operation>
+ <instruction name="MAXPD" form="xmm, xmm" xed="MAXPD_XMMpd_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_min_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b", store the minimum value in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := MIN(a[63:0], b[63:0])
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="MINSD" form="xmm, xmm" xed="MINSD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_min_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := MIN(a[i+63:i], b[i+63:i])
+ENDFOR
+ </operation>
+ <instruction name="MINPD" form="xmm, xmm" xed="MINPD_XMMpd_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_mul_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Multiply the lower double-precision (64-bit) floating-point element in "a" and "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := a[63:0] * b[63:0]
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="MULSD" form="xmm, xmm" xed="MULSD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_mul_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Multiply packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[i+63:i] * b[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="MULPD" form="xmm, xmm" xed="MULPD_XMMpd_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sqrt_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the square root of the lower double-precision (64-bit) floating-point element in "b", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := SQRT(b[63:0])
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="SQRTSD" form="xmm, xmm" xed="SQRTSD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sqrt_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Elementary Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Compute the square root of packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := SQRT(a[i+63:i])
+ENDFOR
+ </operation>
+ <instruction name="SQRTPD" form="xmm, xmm" xed="SQRTPD_XMMpd_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sub_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Subtract the lower double-precision (64-bit) floating-point element in "b" from the lower double-precision (64-bit) floating-point element in "a", store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := a[63:0] - b[63:0]
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="SUBSD" form="xmm, xmm" xed="SUBSD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_sub_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Subtract packed double-precision (64-bit) floating-point elements in "b" from packed double-precision (64-bit) floating-point elements in "a", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="SUBPD" form="xmm, xmm" xed="SUBPD_XMMpd_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_and_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise AND of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] AND b[i+63:i])
+ENDFOR
+ </operation>
+ <instruction name="ANDPD" form="xmm, xmm" xed="ANDPD_XMMxuq_XMMxuq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_andnot_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise NOT of packed double-precision (64-bit) floating-point elements in "a" and then AND with "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i])
+ENDFOR
+ </operation>
+ <instruction name="ANDNPD" form="xmm, xmm" xed="ANDNPD_XMMxuq_XMMxuq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_or_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise OR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[i+63:i] OR b[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="ORPD" form="xmm, xmm" xed="ORPD_XMMxuq_XMMxuq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_xor_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Logical</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compute the bitwise XOR of packed double-precision (64-bit) floating-point elements in "a" and "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[i+63:i] XOR b[i+63:i]
+ENDFOR
+ </operation>
+ <instruction name="XORPD" form="xmm, xmm" xed="XORPD_XMMxuq_XMMxuq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpeq_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for equality, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] == b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmplt_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for less-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] &lt; b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmple_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] &lt;= b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpgt_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for greater-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] &gt; b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpge_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for greater-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] &gt;= b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpord_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>dst[63:0] := (a[63:0] != NaN AND b[63:0] != NaN) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpunord_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>dst[63:0] := (a[63:0] == NaN OR b[63:0] == NaN) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpneq_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (a[63:0] != b[63:0]) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpnlt_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (!(a[63:0] &lt; b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpnle_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (!(a[63:0] &lt;= b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpngt_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (!(a[63:0] &gt; b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpnge_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := (!(a[63:0] &gt;= b[63:0])) ? 0xFFFFFFFFFFFFFFFF : 0
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="CMPSD" form="xmm, xmm, imm8" xed="CMPSD_XMM_XMMsd_XMMsd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpeq_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] == b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmplt_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] &lt; b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmple_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for less-than-or-equal, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] &lt;= b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpgt_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] &gt; b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpge_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for greater-than-or-equal, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] &gt;= b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpord_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if neither is NaN, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] != NaN AND b[i+63:i] != NaN) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpunord_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" to see if either is NaN, and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] == NaN OR b[i+63:i] == NaN) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpneq_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-equal, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (a[i+63:i] != b[i+63:i]) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpnlt_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (!(a[i+63:i] &lt; b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpnle_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-less-than-or-equal, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (!(a[i+63:i] &lt;= b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpngt_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (!(a[i+63:i] &gt; b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cmpnge_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare packed double-precision (64-bit) floating-point elements in "a" and "b" for not-greater-than-or-equal, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := (!(a[i+63:i] &gt;= b[i+63:i])) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="CMPPD" form="xmm, xmm, imm8" xed="CMPPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_comieq_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[63:0] == b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISD" form="xmm, xmm" xed="COMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_comilt_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[63:0] &lt; b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISD" form="xmm, xmm" xed="COMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_comile_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[63:0] &lt;= b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISD" form="xmm, xmm" xed="COMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_comigt_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[63:0] &gt; b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISD" form="xmm, xmm" xed="COMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_comige_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[63:0] &gt;= b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISD" form="xmm, xmm" xed="COMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_comineq_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1).</description>
+ <operation>
+RETURN ( a[63:0] != b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="COMISD" form="xmm, xmm" xed="COMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_ucomieq_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for equality, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[63:0] == b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISD" form="xmm, xmm" xed="UCOMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_ucomilt_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[63:0] &lt; b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISD" form="xmm, xmm" xed="UCOMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_ucomile_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for less-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[63:0] &lt;= b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISD" form="xmm, xmm" xed="UCOMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_ucomigt_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[63:0] &gt; b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISD" form="xmm, xmm" xed="UCOMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_ucomige_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for greater-than-or-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[63:0] &gt;= b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISD" form="xmm, xmm" xed="UCOMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_ucomineq_sd">
+ <type>Floating Point</type>
+ <type>Flag</type>
+ <CPUID>SSE2</CPUID>
+ <category>Compare</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Compare the lower double-precision (64-bit) floating-point element in "a" and "b" for not-equal, and return the boolean result (0 or 1). This instruction will not signal an exception for QNaNs.</description>
+ <operation>
+RETURN ( a[63:0] != b[63:0] ) ? 1 : 0
+ </operation>
+ <instruction name="UCOMISD" form="xmm, xmm" xed="UCOMISD_XMMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtpd_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed single-precision (32-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_FP32(a[k+63:k])
+ENDFOR
+dst[127:64] := 0
+ </operation>
+ <instruction name="CVTPD2PS" form="xmm, xmm" xed="CVTPD2PS_XMMps_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtps_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed double-precision (64-bit) floating-point elements, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 32*j
+ dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k])
+ENDFOR
+ </operation>
+ <instruction name="CVTPS2PD" form="xmm, xmm" xed="CVTPS2PD_XMMpd_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k])
+ENDFOR
+ </operation>
+ <instruction name="CVTPD2DQ" form="xmm, xmm" xed="CVTPD2DQ_XMMdq_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsd_si32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_Int32(a[63:0])
+ </operation>
+ <instruction name="CVTSD2SI" form="r32, xmm" xed="CVTSD2SI_GPR32d_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsd_si64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_Int64(a[63:0])
+ </operation>
+ <instruction name="CVTSD2SI" form="r64, xmm" xed="CVTSD2SI_GPR64q_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsd_si64x">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_Int64(a[63:0])
+ </operation>
+ <instruction name="CVTSD2SI" form="r64, xmm" xed="CVTSD2SI_GPR64q_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsd_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "b" to a single-precision (32-bit) floating-point element, store the result in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_FP32(b[63:0])
+dst[127:32] := a[127:32]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="CVTSD2SS" form="xmm, xmm" xed="CVTSD2SS_XMMss_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtsd_f64">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="double" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Copy the lower double-precision (64-bit) floating-point element of "a" to "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+ </operation>
+ <instruction name="MOVSD" form="m64, xmm" xed="MOVSD_XMM_MEMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtss_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Convert the lower single-precision (32-bit) floating-point element in "b" to a double-precision (64-bit) floating-point element, store the result in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP32_To_FP64(b[31:0])
+dst[127:64] := a[127:64]
+dst[MAX:128] := 0
+ </operation>
+ <instruction name="CVTSS2SD" form="xmm, xmm" xed="CVTSS2SD_XMMsd_XMMss"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvttpd_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k])
+ENDFOR
+ </operation>
+ <instruction name="CVTTPD2DQ" form="xmm, xmm" xed="CVTTPD2DQ_XMMdq_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvttsd_si32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 32-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[31:0] := Convert_FP64_To_Int32_Truncate(a[63:0])
+ </operation>
+ <instruction name="CVTTSD2SI" form="r32, xmm" xed="CVTTSD2SI_GPR32d_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvttsd_si64">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0])
+ </operation>
+ <instruction name="CVTTSD2SI" form="r64, xmm" xed="CVTTSD2SI_GPR64q_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvttsd_si64x">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert the lower double-precision (64-bit) floating-point element in "a" to a 64-bit integer with truncation, and store the result in "dst".</description>
+ <operation>
+dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0])
+ </operation>
+ <instruction name="CVTTSD2SI" form="r64, xmm" xed="CVTTSD2SI_GPR64q_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvtps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="CVTPS2DQ" form="xmm, xmm" xed="CVTPS2DQ_XMMdq_XMMps"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_cvttps_epi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Convert packed single-precision (32-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="CVTTPS2DQ" form="xmm, xmm" xed="CVTTPS2DQ_XMMdq_XMMps"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_cvtpd_pi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k])
+ENDFOR
+ </operation>
+ <instruction name="CVTPD2PI" form="mm, xmm" xed="CVTPD2PI_MMXq_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_cvttpd_pi32">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Convert</category>
+ <return type="__m64" varname="dst" etype="FP32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Convert packed double-precision (64-bit) floating-point elements in "a" to packed 32-bit integers with truncation, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 32*j
+ k := 64*j
+ dst[i+31:i] := Convert_FP64_To_Int32_Truncate(a[k+63:k])
+ENDFOR
+ </operation>
+ <instruction name="CVTTPD2PI" form="mm, xmm" xed="CVTTPD2PI_MMXq_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="a" etype="FP64"/>
+ <description>Copy double-precision (64-bit) floating-point element "a" to the lower element of "dst", and zero the upper element.</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[127:64] := 0
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set1_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="a" etype="FP64"/>
+ <description>Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set_pd1">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="a" etype="FP64"/>
+ <description>Broadcast double-precision (64-bit) floating-point value "a" to all elements of "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := a[63:0]
+ENDFOR
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_set_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="e1" etype="FP64"/>
+ <parameter type="double" varname="e0" etype="FP64"/>
+ <description>Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values.</description>
+ <operation>
+dst[63:0] := e0
+dst[127:64] := e1
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_setr_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double" varname="e1" etype="FP64"/>
+ <parameter type="double" varname="e0" etype="FP64"/>
+ <description>Set packed double-precision (64-bit) floating-point elements in "dst" with the supplied values in reverse order.</description>
+ <operation>
+dst[63:0] := e1
+dst[127:64] := e0
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_setzero_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Set</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="void"/>
+ <description>Return vector of type __m128d with all elements set to zero.</description>
+ <operation>
+dst[MAX:0] := 0
+ </operation>
+ <instruction name="XORPD" form="xmm, xmm" xed="XORPD_XMMxuq_XMMxuq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_load_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <description>Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from memory into "dst".
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+ </operation>
+ <instruction name="MOVAPD" form="xmm, m128" xed="MOVAPD_XMMpd_MEMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_load1_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <description>Load a double-precision (64-bit) floating-point element from memory into both elements of "dst".</description>
+ <operation>
+dst[63:0] := MEM[mem_addr+63:mem_addr]
+dst[127:64] := MEM[mem_addr+63:mem_addr]
+ </operation>
+ <instruction name="MOVAPD" form="xmm, m128" xed="MOVAPD_XMMpd_MEMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_load_pd1">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <description>Load a double-precision (64-bit) floating-point element from memory into both elements of "dst".</description>
+ <operation>
+dst[63:0] := MEM[mem_addr+63:mem_addr]
+dst[127:64] := MEM[mem_addr+63:mem_addr]
+ </operation>
+ <instruction name="MOVAPD" form="xmm, m128" xed="MOVAPD_XMMpd_MEMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_loadr_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <description>Load 2 double-precision (64-bit) floating-point elements from memory into "dst" in reverse order. mem_addr must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[63:0] := MEM[mem_addr+127:mem_addr+64]
+dst[127:64] := MEM[mem_addr+63:mem_addr]
+ </operation>
+ <instruction name="MOVAPD" form="xmm, m128" xed="MOVAPD_XMMpd_MEMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_loadu_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <description>Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from memory into "dst".
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+ </operation>
+ <instruction name="MOVUPD" form="xmm, m128" xed="MOVUPD_XMMpd_MEMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_load_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <description>Load a double-precision (64-bit) floating-point element from memory into the lower of "dst", and zero the upper element. "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[63:0] := MEM[mem_addr+63:mem_addr]
+dst[127:64] := 0
+ </operation>
+ <instruction name="MOVSD" form="xmm, m64" xed="MOVSD_XMM_XMMdq_MEMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_loadh_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="double const*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <description>Load a double-precision (64-bit) floating-point element from memory into the upper element of "dst", and copy the lower element from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[127:64] := MEM[mem_addr+63:mem_addr]
+ </operation>
+ <instruction name="MOVHPD" form="xmm, m64" xed="MOVHPD_XMMsd_MEMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_loadl_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="double const*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <description>Load a double-precision (64-bit) floating-point element from memory into the lower element of "dst", and copy the upper element from "a" to "dst". "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+dst[63:0] := MEM[mem_addr+63:mem_addr]
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="MOVLPD" form="xmm, m64" xed="MOVLPD_XMMsd_MEMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_stream_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="MOVNTPD" form="m128, xmm" xed="MOVNTPD_MEMdq_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_store_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store the lower double-precision (64-bit) floating-point element from "a" into memory. "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+63:mem_addr] := a[63:0]
+ </operation>
+ <instruction name="MOVSD" form="m64, xmm" xed="MOVSD_XMM_MEMsd_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_store1_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store the lower double-precision (64-bit) floating-point element from "a" into 2 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+63:mem_addr] := a[63:0]
+MEM[mem_addr+127:mem_addr+64] := a[63:0]
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_store_pd1">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store the lower double-precision (64-bit) floating-point element from "a" into 2 contiguous elements in memory. "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+63:mem_addr] := a[63:0]
+MEM[mem_addr+127:mem_addr+64] := a[63:0]
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_store_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory.
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="MOVAPD" form="m128, xmm" xed="MOVAPD_MEMpd_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_storeu_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point elements) from "a" into memory.
+ "mem_addr" does not need to be aligned on any particular boundary.</description>
+ <operation>
+MEM[mem_addr+127:mem_addr] := a[127:0]
+ </operation>
+ <instruction name="MOVUPD" form="m128, xmm" xed="MOVUPD_MEMpd_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" sequence="TRUE" name="_mm_storer_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double*" varname="mem_addr" etype="FP64" memwidth="128"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store 2 double-precision (64-bit) floating-point elements from "a" into memory in reverse order.
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+MEM[mem_addr+63:mem_addr] := a[127:64]
+MEM[mem_addr+127:mem_addr+64] := a[63:0]
+ </operation>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_storeh_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store the upper double-precision (64-bit) floating-point element from "a" into memory.</description>
+ <operation>
+MEM[mem_addr+63:mem_addr] := a[127:64]
+ </operation>
+ <instruction name="MOVHPD" form="m64, xmm" xed="MOVHPD_MEMq_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_storel_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Store</category>
+ <return type="void"/>
+ <parameter type="double*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Store the lower double-precision (64-bit) floating-point element from "a" into memory.</description>
+ <operation>
+MEM[mem_addr+63:mem_addr] := a[63:0]
+ </operation>
+ <instruction name="MOVLPD" form="m64, xmm" xed="MOVLPD_MEMq_XMMsd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_unpackhi_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the high half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[127:64]
+ dst[127:64] := src2[127:64]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0])
+ </operation>
+ <instruction name="UNPCKHPD" form="xmm, xmm" xed="UNPCKHPD_XMMpd_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_unpacklo_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Unpack and interleave double-precision (64-bit) floating-point elements from the low half of "a" and "b", and store the results in "dst".</description>
+ <operation>
+DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) {
+ dst[63:0] := src1[63:0]
+ dst[127:64] := src2[63:0]
+ RETURN dst[127:0]
+}
+dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0])
+ </operation>
+ <instruction name="UNPCKLPD" form="xmm, xmm" xed="UNPCKLPD_XMMpd_XMMq"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_movemask_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Miscellaneous</category>
+ <return type="int" varname="dst" etype="MASK"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Set each bit of mask "dst" based on the most significant bit of the corresponding packed double-precision (64-bit) floating-point element in "a".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF a[i+63]
+ dst[j] := 1
+ ELSE
+ dst[j] := 0
+ FI
+ENDFOR
+dst[MAX:2] := 0
+ </operation>
+ <instruction name="MOVMSKPD" form="r32, xmm" xed="MOVMSKPD_GPR32_XMMpd"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_shuffle_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Shuffle double-precision (64-bit) floating-point elements using the control in "imm8", and store the results in "dst".</description>
+ <operation>
+dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64]
+dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64]
+ </operation>
+ <instruction name="SHUFPD" form="xmm, xmm, imm8" xed="SHUFPD_XMMpd_XMMpd_IMMb"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" vexEq="TRUE" name="_mm_move_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Move</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Move the lower double-precision (64-bit) floating-point element from "b" to the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := b[63:0]
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="MOVSD" form="xmm, xmm" xed="MOVSD_XMM_XMMsd_XMMsd_0F10"/>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_castpd_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Cast</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m128d to type __m128. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_castpd_si128">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Cast</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Cast vector of type __m128d to type __m128i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_castps_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Cast</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m128 to type __m128d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_castps_si128">
+ <type>Floating Point</type>
+ <type>Integer</type>
+ <CPUID>SSE2</CPUID>
+ <category>Cast</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Cast vector of type __m128 to type __m128i. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_castsi128_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Cast</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <description>Cast vector of type __m128i to type __m128d. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE2" name="_mm_castsi128_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE2</CPUID>
+ <category>Cast</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Cast vector of type __m128i to type __m128. This intrinsic is only used for compilation and does not generate any instructions, thus it has zero latency.</description>
+ <header>emmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE3" vexEq="TRUE" name="_mm_addsub_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Alternatively add and subtract packed single-precision (32-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF ((j &amp; 1) == 0)
+ dst[i+31:i] := a[i+31:i] - b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i] + b[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="ADDSUBPS" form="xmm, xmm" xed="ADDSUBPS_XMMps_XMMps"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE3" vexEq="TRUE" name="_mm_addsub_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Alternatively add and subtract packed double-precision (64-bit) floating-point elements in "a" to/from packed elements in "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF ((j &amp; 1) == 0)
+ dst[i+63:i] := a[i+63:i] - b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i] + b[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="ADDSUBPD" form="xmm, xmm" xed="ADDSUBPD_XMMpd_XMMpd"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE3" vexEq="TRUE" name="_mm_hadd_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Horizontally add adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst".</description>
+ <operation>
+dst[63:0] := a[127:64] + a[63:0]
+dst[127:64] := b[127:64] + b[63:0]
+ </operation>
+ <instruction name="HADDPD" form="xmm, xmm" xed="HADDPD_XMMpd_XMMpd"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE3" vexEq="TRUE" name="_mm_hadd_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Horizontally add adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst".</description>
+ <operation>
+dst[31:0] := a[63:32] + a[31:0]
+dst[63:32] := a[127:96] + a[95:64]
+dst[95:64] := b[63:32] + b[31:0]
+dst[127:96] := b[127:96] + b[95:64]
+ </operation>
+ <instruction name="HADDPS" form="xmm, xmm" xed="HADDPS_XMMps_XMMps"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE3" vexEq="TRUE" name="_mm_hsub_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Horizontally subtract adjacent pairs of double-precision (64-bit) floating-point elements in "a" and "b", and pack the results in "dst".</description>
+ <operation>
+dst[63:0] := a[63:0] - a[127:64]
+dst[127:64] := b[63:0] - b[127:64]
+ </operation>
+ <instruction name="HSUBPD" form="xmm, xmm" xed="HSUBPD_XMMpd_XMMpd"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE3" vexEq="TRUE" name="_mm_hsub_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Horizontally add adjacent pairs of single-precision (32-bit) floating-point elements in "a" and "b", and pack the results in "dst".</description>
+ <operation>
+dst[31:0] := a[31:0] - a[63:32]
+dst[63:32] := a[95:64] - a[127:96]
+dst[95:64] := b[31:0] - b[63:32]
+dst[127:96] := b[95:64] - b[127:96]
+ </operation>
+ <instruction name="HSUBPS" form="xmm, xmm" xed="HSUBPS_XMMps_XMMps"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE3" vexEq="TRUE" name="_mm_lddqu_si128">
+ <type>Integer</type>
+ <CPUID>SSE3</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i const*" varname="mem_addr" etype="M128" memwidth="128"/>
+ <description>Load 128-bits of integer data from unaligned memory into "dst". This intrinsic may perform better than "_mm_loadu_si128" when the data crosses a cache line boundary.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+ </operation>
+ <instruction name="LDDQU" form="xmm, m128" xed="LDDQU_XMMpd_MEMdq"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE3" vexEq="TRUE" name="_mm_movedup_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE3</CPUID>
+ <category>Move</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Duplicate the low double-precision (64-bit) floating-point element from "a", and store the results in "dst".</description>
+ <operation>
+dst[63:0] := a[63:0]
+dst[127:64] := a[63:0]
+ </operation>
+ <instruction name="MOVDDUP" form="xmm, xmm" xed="MOVDDUP_XMMdq_XMMq"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE3" vexEq="TRUE" name="_mm_loaddup_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE3</CPUID>
+ <category>Load</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="double const*" varname="mem_addr" etype="FP64" memwidth="64"/>
+ <description>Load a double-precision (64-bit) floating-point element from memory into both elements of "dst".</description>
+ <operation>
+dst[63:0] := MEM[mem_addr+63:mem_addr]
+dst[127:64] := MEM[mem_addr+63:mem_addr]
+ </operation>
+ <instruction name="MOVDDUP" form="xmm, m64" xed="MOVDDUP_XMMdq_MEMq"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE3" vexEq="TRUE" name="_mm_movehdup_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE3</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Duplicate odd-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst".</description>
+ <operation>
+dst[31:0] := a[63:32]
+dst[63:32] := a[63:32]
+dst[95:64] := a[127:96]
+dst[127:96] := a[127:96]
+ </operation>
+ <instruction name="MOVSHDUP" form="xmm, xmm" xed="MOVSHDUP_XMMps_XMMps"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE3" vexEq="TRUE" name="_mm_moveldup_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE3</CPUID>
+ <category>Move</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Duplicate even-indexed single-precision (32-bit) floating-point elements from "a", and store the results in "dst".</description>
+ <operation>
+dst[31:0] := a[31:0]
+dst[63:32] := a[31:0]
+dst[95:64] := a[95:64]
+dst[127:96] := a[95:64]
+ </operation>
+ <instruction name="MOVSLDUP" form="xmm, xmm" xed="MOVSLDUP_XMMps_XMMps"/>
+ <header>pmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_blend_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF imm8[j]
+ dst[i+63:i] := b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="BLENDPD" form="xmm, xmm, imm8" xed="BLENDPD_XMMdq_XMMdq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_blend_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using control mask "imm8", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF imm8[j]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="BLENDPS" form="xmm, xmm, imm8" xed="BLENDPS_XMMdq_XMMdq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_blendv_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="__m128d" varname="mask" etype="FP64"/>
+ <description>Blend packed double-precision (64-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ IF mask[i+63]
+ dst[i+63:i] := b[i+63:i]
+ ELSE
+ dst[i+63:i] := a[i+63:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="BLENDVPD" form="xmm, xmm" xed="BLENDVPD_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_blendv_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="__m128" varname="mask" etype="FP32"/>
+ <description>Blend packed single-precision (32-bit) floating-point elements from "a" and "b" using "mask", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF mask[i+31]
+ dst[i+31:i] := b[i+31:i]
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="BLENDVPS" form="xmm, xmm" xed="BLENDVPS_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_blendv_epi8">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <parameter type="__m128i" varname="mask" etype="UI8"/>
+ <description>Blend packed 8-bit integers from "a" and "b" using "mask", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF mask[i+7]
+ dst[i+7:i] := b[i+7:i]
+ ELSE
+ dst[i+7:i] := a[i+7:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PBLENDVB" form="xmm, xmm" xed="PBLENDVB_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_blend_epi16">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Blend packed 16-bit integers from "a" and "b" using control mask "imm8", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF imm8[j]
+ dst[i+15:i] := b[i+15:i]
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PBLENDW" form="xmm, xmm, imm8" xed="PBLENDW_XMMdq_XMMdq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_dp_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Conditionally multiply the packed double-precision (64-bit) floating-point elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and conditionally store the sum in "dst" using the low 4 bits of "imm8".</description>
+ <operation>
+DEFINE DP(a[127:0], b[127:0], imm8[7:0]) {
+ FOR j := 0 to 1
+ i := j*64
+ IF imm8[(4+j)%8]
+ temp[i+63:i] := a[i+63:i] * b[i+63:i]
+ ELSE
+ temp[i+63:i] := 0.0
+ FI
+ ENDFOR
+
+ sum[63:0] := temp[127:64] + temp[63:0]
+
+ FOR j := 0 to 1
+ i := j*64
+ IF imm8[j%8]
+ tmpdst[i+63:i] := sum[63:0]
+ ELSE
+ tmpdst[i+63:i] := 0.0
+ FI
+ ENDFOR
+ RETURN tmpdst[127:0]
+}
+dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0])
+ </operation>
+ <instruction name="DPPD" form="xmm, xmm, imm8" xed="DPPD_XMMdq_XMMdq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_dp_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Conditionally multiply the packed single-precision (32-bit) floating-point elements in "a" and "b" using the high 4 bits in "imm8", sum the four products, and conditionally store the sum in "dst" using the low 4 bits of "imm8".</description>
+ <operation>
+DEFINE DP(a[127:0], b[127:0], imm8[7:0]) {
+ FOR j := 0 to 3
+ i := j*32
+ IF imm8[(4+j)%8]
+ temp[i+31:i] := a[i+31:i] * b[i+31:i]
+ ELSE
+ temp[i+31:i] := 0
+ FI
+ ENDFOR
+
+ sum[31:0] := (temp[127:96] + temp[95:64]) + (temp[63:32] + temp[31:0])
+
+ FOR j := 0 to 3
+ i := j*32
+ IF imm8[j%8]
+ tmpdst[i+31:i] := sum[31:0]
+ ELSE
+ tmpdst[i+31:i] := 0
+ FI
+ ENDFOR
+ RETURN tmpdst[127:0]
+}
+dst[127:0] := DP(a[127:0], b[127:0], imm8[7:0])
+ </operation>
+ <instruction name="DPPS" form="xmm, xmm, imm8" xed="DPPS_XMMdq_XMMdq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_extract_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract a single-precision (32-bit) floating-point element from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+dst[31:0] := (a[127:0] &gt;&gt; (imm8[1:0] * 32))[31:0]
+ </operation>
+ <instruction name="EXTRACTPS" form="r32, xmm, imm8" xed="EXTRACTPS_GPR32d_XMMdq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_extract_epi8">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="int" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Extract an 8-bit integer from "a", selected with "imm8", and store the result in the lower element of "dst".</description>
+ <operation>
+dst[7:0] := (a[127:0] &gt;&gt; (imm8[3:0] * 8))[7:0]
+dst[31:8] := 0
+ </operation>
+ <instruction name="PEXTRB" form="r32, xmm, imm8" xed="PEXTRB_GPR32d_XMMdq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_extract_epi32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Extract a 32-bit integer from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+dst[31:0] := (a[127:0] &gt;&gt; (imm8[1:0] * 32))[31:0]
+ </operation>
+ <instruction name="PEXTRD" form="r32, xmm, imm8" xed="PEXTRD_GPR32d_XMMdq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_extract_epi64">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Extract a 64-bit integer from "a", selected with "imm8", and store the result in "dst".</description>
+ <operation>
+dst[63:0] := (a[127:0] &gt;&gt; (imm8[0] * 64))[63:0]
+ </operation>
+ <instruction name="PEXTRQ" form="r64, xmm, imm8" xed="PEXTRQ_GPR64q_XMMdq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_insert_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Copy "a" to "tmp", then insert a single-precision (32-bit) floating-point element from "b" into "tmp" using the control in "imm8". Store "tmp" to "dst" using the mask in "imm8" (elements are zeroed out when the corresponding bit is set).</description>
+ <operation>
+tmp2[127:0] := a[127:0]
+CASE (imm8[7:6]) OF
+0: tmp1[31:0] := b[31:0]
+1: tmp1[31:0] := b[63:32]
+2: tmp1[31:0] := b[95:64]
+3: tmp1[31:0] := b[127:96]
+ESAC
+CASE (imm8[5:4]) OF
+0: tmp2[31:0] := tmp1[31:0]
+1: tmp2[63:32] := tmp1[31:0]
+2: tmp2[95:64] := tmp1[31:0]
+3: tmp2[127:96] := tmp1[31:0]
+ESAC
+FOR j := 0 to 3
+ i := j*32
+ IF imm8[j%8]
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := tmp2[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="INSERTPS" form="xmm, xmm, imm8" xed="INSERTPS_XMMps_XMMps_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_insert_epi8">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="int" varname="i" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Copy "a" to "dst", and insert the lower 8-bit integer from "i" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[127:0] := a[127:0]
+sel := imm8[3:0]*8
+dst[sel+7:sel] := i[7:0]
+ </operation>
+ <instruction name="PINSRB" form="xmm, r32, imm8" xed="PINSRB_XMMdq_GPR32d_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_insert_epi32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="int" varname="i" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="2"/>
+ <description>Copy "a" to "dst", and insert the 32-bit integer "i" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[127:0] := a[127:0]
+sel := imm8[1:0]*32
+dst[sel+31:sel] := i[31:0]
+ </operation>
+ <instruction name="PINSRD" form="xmm, r32, imm8" xed="PINSRD_XMMdq_GPR32d_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_insert_epi64">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__int64" varname="i" etype="UI64"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="1"/>
+ <description>Copy "a" to "dst", and insert the 64-bit integer "i" into "dst" at the location specified by "imm8".</description>
+ <operation>
+dst[127:0] := a[127:0]
+sel := imm8[0]*64
+dst[sel+63:sel] := i[63:0]
+ </operation>
+ <instruction name="PINSRQ" form="xmm, r64, imm8" xed="PINSRQ_XMMdq_GPR64q_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_max_epi8">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := MAX(a[i+7:i], b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PMAXSB" form="xmm, xmm" xed="PMAXSB_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_max_epi32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="PMAXSD" form="xmm, xmm" xed="PMAXSD_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_max_epu32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := MAX(a[i+31:i], b[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="PMAXUD" form="xmm, xmm" xed="PMAXUD_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_max_epu16">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed maximum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := MAX(a[i+15:i], b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PMAXUW" form="xmm, xmm" xed="PMAXUW_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_min_epi8">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Compare packed signed 8-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := MIN(a[i+7:i], b[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PMINSB" form="xmm, xmm" xed="PMINSB_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_min_epi32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Compare packed signed 32-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="PMINSD" form="xmm, xmm" xed="PMINSD_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_min_epu32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Compare packed unsigned 32-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := MIN(a[i+31:i], b[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="PMINUD" form="xmm, xmm" xed="PMINUD_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_min_epu16">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <parameter type="__m128i" varname="b" etype="UI16"/>
+ <description>Compare packed unsigned 16-bit integers in "a" and "b", and store packed minimum values in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := MIN(a[i+15:i], b[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PMINUW" form="xmm, xmm" xed="PMINUW_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_packus_epi32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Convert packed signed 32-bit integers from "a" and "b" to packed 16-bit integers using unsigned saturation, and store the results in "dst".</description>
+ <operation>
+dst[15:0] := SaturateU16(a[31:0])
+dst[31:16] := SaturateU16(a[63:32])
+dst[47:32] := SaturateU16(a[95:64])
+dst[63:48] := SaturateU16(a[127:96])
+dst[79:64] := SaturateU16(b[31:0])
+dst[95:80] := SaturateU16(b[63:32])
+dst[111:96] := SaturateU16(b[95:64])
+dst[127:112] := SaturateU16(b[127:96])
+ </operation>
+ <instruction name="PACKUSDW" form="xmm, xmm" xed="PACKUSDW_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cmpeq_epi64">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI64"/>
+ <parameter type="__m128i" varname="b" etype="UI64"/>
+ <description>Compare packed 64-bit integers in "a" and "b" for equality, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ( a[i+63:i] == b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPEQQ" form="xmm, xmm" xed="PCMPEQQ_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepi8_epi16">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ l := j*16
+ dst[l+15:l] := SignExtend16(a[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PMOVSXBW" form="xmm, xmm" xed="PMOVSXBW_XMMdq_XMMq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepi8_epi32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 8*j
+ dst[i+31:i] := SignExtend32(a[k+7:k])
+ENDFOR
+ </operation>
+ <instruction name="PMOVSXBD" form="xmm, xmm" xed="PMOVSXBD_XMMdq_XMMd"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepi8_epi64">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Sign extend packed 8-bit integers in the low 8 bytes of "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 8*j
+ dst[i+63:i] := SignExtend64(a[k+7:k])
+ENDFOR
+ </operation>
+ <instruction name="PMOVSXBQ" form="xmm, xmm" xed="PMOVSXBQ_XMMdq_XMMw"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepi16_epi32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 16*j
+ dst[i+31:i] := SignExtend32(a[k+15:k])
+ENDFOR
+ </operation>
+ <instruction name="PMOVSXWD" form="xmm, xmm" xed="PMOVSXWD_XMMdq_XMMq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepi16_epi64">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Sign extend packed 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 16*j
+ dst[i+63:i] := SignExtend64(a[k+15:k])
+ENDFOR
+ </operation>
+ <instruction name="PMOVSXWQ" form="xmm, xmm" xed="PMOVSXWQ_XMMdq_XMMd"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepi32_epi64">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Sign extend packed 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 32*j
+ dst[i+63:i] := SignExtend64(a[k+31:k])
+ENDFOR
+ </operation>
+ <instruction name="PMOVSXDQ" form="xmm, xmm" xed="PMOVSXDQ_XMMdq_XMMq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepu8_epi16">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 16-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ l := j*16
+ dst[l+15:l] := ZeroExtend16(a[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PMOVZXBW" form="xmm, xmm" xed="PMOVZXBW_XMMdq_XMMq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepu8_epi32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 8*j
+ dst[i+31:i] := ZeroExtend32(a[k+7:k])
+ENDFOR
+ </operation>
+ <instruction name="PMOVZXBD" form="xmm, xmm" xed="PMOVZXBD_XMMdq_XMMd"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepu8_epi64">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <description>Zero extend packed unsigned 8-bit integers in the low 8 byte sof "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 8*j
+ dst[i+63:i] := ZeroExtend64(a[k+7:k])
+ENDFOR
+ </operation>
+ <instruction name="PMOVZXBQ" form="xmm, xmm" xed="PMOVZXBQ_XMMdq_XMMw"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepu16_epi32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 32-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := 32*j
+ k := 16*j
+ dst[i+31:i] := ZeroExtend32(a[k+15:k])
+ENDFOR
+ </operation>
+ <instruction name="PMOVZXWD" form="xmm, xmm" xed="PMOVZXWD_XMMdq_XMMq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepu16_epi64">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Zero extend packed unsigned 16-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 16*j
+ dst[i+63:i] := ZeroExtend64(a[k+15:k])
+ENDFOR
+ </operation>
+ <instruction name="PMOVZXWQ" form="xmm, xmm" xed="PMOVZXWQ_XMMdq_XMMd"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_cvtepu32_epi64">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Convert</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <description>Zero extend packed unsigned 32-bit integers in "a" to packed 64-bit integers, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := 64*j
+ k := 32*j
+ dst[i+63:i] := ZeroExtend64(a[k+31:k])
+ENDFOR
+ </operation>
+ <instruction name="PMOVZXDQ" form="xmm, xmm" xed="PMOVZXDQ_XMMdq_XMMq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_mul_epi32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI64"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Multiply the low signed 32-bit integers from each packed 64-bit element in "a" and "b", and store the signed 64-bit results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := SignExtend64(a[i+31:i]) * SignExtend64(b[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="PMULDQ" form="xmm, xmm" xed="PMULDQ_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_mullo_epi32">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="UI32"/>
+ <description>Multiply the packed 32-bit integers in "a" and "b", producing intermediate 64-bit integers, and store the low 32 bits of the intermediate integers in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ tmp[63:0] := a[i+31:i] * b[i+31:i]
+ dst[i+31:i] := tmp[31:0]
+ENDFOR
+ </operation>
+ <instruction name="PMULLD" form="xmm, xmm" xed="PMULLD_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_testz_si128">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <description>Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "ZF" value.</description>
+ <operation>
+IF ((a[127:0] AND b[127:0]) == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+IF (((NOT a[127:0]) AND b[127:0]) == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+RETURN ZF
+ </operation>
+ <instruction name="PTEST" form="xmm, xmm" xed="PTEST_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_testc_si128">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="k" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <description>Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return the "CF" value.</description>
+ <operation>
+IF ((a[127:0] AND b[127:0]) == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+IF (((NOT a[127:0]) AND b[127:0]) == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+RETURN CF
+ </operation>
+ <instruction name="PTEST" form="xmm, xmm" xed="PTEST_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_testnzc_si128">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <description>Compute the bitwise AND of 128 bits (representing integer data) in "a" and "b", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "b", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0.</description>
+ <operation>
+IF ((a[127:0] AND b[127:0]) == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+IF (((NOT a[127:0]) AND b[127:0]) == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+IF (ZF == 0 &amp;&amp; CF == 0)
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="PTEST" form="xmm, xmm" xed="PTEST_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_test_all_zeros">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="mask" etype="M128"/>
+ <description>Compute the bitwise AND of 128 bits (representing integer data) in "a" and "mask", and return 1 if the result is zero, otherwise return 0.</description>
+ <operation>
+IF ((a[127:0] AND mask[127:0]) == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+dst := ZF
+ </operation>
+ <instruction name="PTEST" form="xmm, xmm" xed="PTEST_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_test_mix_ones_zeros">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="mask" etype="M128"/>
+ <description>Compute the bitwise AND of 128 bits (representing integer data) in "a" and "mask", and set "ZF" to 1 if the result is zero, otherwise set "ZF" to 0. Compute the bitwise NOT of "a" and then AND with "mask", and set "CF" to 1 if the result is zero, otherwise set "CF" to 0. Return 1 if both the "ZF" and "CF" values are zero, otherwise return 0.</description>
+ <operation>
+IF ((a[127:0] AND mask[127:0]) == 0)
+ ZF := 1
+ELSE
+ ZF := 0
+FI
+IF (((NOT a[127:0]) AND mask[127:0]) == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+IF (ZF == 0 &amp;&amp; CF == 0)
+ dst := 1
+ELSE
+ dst := 0
+FI
+ </operation>
+ <instruction name="PTEST" form="xmm, xmm" xed="PTEST_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" sequence="TRUE" name="_mm_test_all_ones">
+ <type>Integer</type>
+ <type>Flag</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Logical</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <description>Compute the bitwise NOT of "a" and then AND with a 128-bit vector containing all 1's, and return 1 if the result is zero, otherwise return 0.</description>
+ <operation>
+FOR j := 0 to 127
+ tmp[j] := 1
+ENDFOR
+IF (((NOT a[127:0]) AND tmp[127:0]) == 0)
+ CF := 1
+ELSE
+ CF := 0
+FI
+dst := CF
+ </operation>
+ <instruction name="PCMPEQD" form="xmm, xmm" xed="PCMPEQD_XMMdq_XMMdq"/>
+ <instruction name="PTEST" form="xmm, xmm" xed="PTEST_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_round_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" immtype="_MM_FROUND"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed double-precision floating-point elements in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ROUND(a[i+63:i], rounding)
+ENDFOR
+ </operation>
+ <instruction name="ROUNDPD" form="xmm, xmm, imm8" xed="ROUNDPD_XMMpd_XMMpd_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_floor_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" down to an integer value, and store the results as packed double-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := FLOOR(a[i+63:i])
+ENDFOR
+ </operation>
+ <instruction name="ROUNDPD" form="xmm, xmm, imm8" xed="ROUNDPD_XMMpd_XMMpd_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_ceil_pd">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <description>Round the packed double-precision (64-bit) floating-point elements in "a" up to an integer value, and store the results as packed double-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := CEIL(a[i+63:i])
+ENDFOR
+ </operation>
+ <instruction name="ROUNDPD" form="xmm, xmm, imm8" xed="ROUNDPD_XMMpd_XMMpd_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_round_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" immtype="_MM_FROUND"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" using the "rounding" parameter, and store the results as packed single-precision floating-point elements in "dst".
+ [round_note]</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ROUND(a[i+31:i], rounding)
+ENDFOR
+ </operation>
+ <instruction name="ROUNDPS" form="xmm, xmm, imm8" xed="ROUNDPS_XMMps_XMMps_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_floor_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" down to an integer value, and store the results as packed single-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := FLOOR(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="ROUNDPS" form="xmm, xmm, imm8" xed="ROUNDPS_XMMps_XMMps_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_ceil_ps">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <description>Round the packed single-precision (32-bit) floating-point elements in "a" up to an integer value, and store the results as packed single-precision floating-point elements in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := CEIL(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="ROUNDPS" form="xmm, xmm, imm8" xed="ROUNDPS_XMMps_XMMps_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_round_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <parameter type="int" varname="rounding" etype="IMM" immtype="_MM_FROUND"/>
+ <description>Round the lower double-precision (64-bit) floating-point element in "b" using the "rounding" parameter, store the result as a double-precision floating-point element in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".
+ [round_note]</description>
+ <operation>
+dst[63:0] := ROUND(b[63:0], rounding)
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="ROUNDSD" form="xmm, xmm, imm8" xed="ROUNDSD_XMMq_XMMq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_floor_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Round the lower double-precision (64-bit) floating-point element in "b" down to an integer value, store the result as a double-precision floating-point element in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := FLOOR(b[63:0])
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="ROUNDSD" form="xmm, xmm, imm8" xed="ROUNDSD_XMMq_XMMq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_ceil_sd">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128d" varname="dst" etype="FP64"/>
+ <parameter type="__m128d" varname="a" etype="FP64"/>
+ <parameter type="__m128d" varname="b" etype="FP64"/>
+ <description>Round the lower double-precision (64-bit) floating-point element in "b" up to an integer value, store the result as a double-precision floating-point element in the lower element of "dst", and copy the upper element from "a" to the upper element of "dst".</description>
+ <operation>
+dst[63:0] := CEIL(b[63:0])
+dst[127:64] := a[127:64]
+ </operation>
+ <instruction name="ROUNDSD" form="xmm, xmm, imm8" xed="ROUNDSD_XMMq_XMMq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_round_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <parameter type="int" varname="rounding" etype="IMM" immtype="_MM_FROUND"/>
+ <description>Round the lower single-precision (32-bit) floating-point element in "b" using the "rounding" parameter, store the result as a single-precision floating-point element in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".
+ [round_note]</description>
+ <operation>
+dst[31:0] := ROUND(b[31:0], rounding)
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="ROUNDSS" form="xmm, xmm, imm8" xed="ROUNDSS_XMMd_XMMd_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_floor_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Round the lower single-precision (32-bit) floating-point element in "b" down to an integer value, store the result as a single-precision floating-point element in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := FLOOR(b[31:0])
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="ROUNDSS" form="xmm, xmm, imm8" xed="ROUNDSS_XMMd_XMMd_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_ceil_ss">
+ <type>Floating Point</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128" varname="dst" etype="FP32"/>
+ <parameter type="__m128" varname="a" etype="FP32"/>
+ <parameter type="__m128" varname="b" etype="FP32"/>
+ <description>Round the lower single-precision (32-bit) floating-point element in "b" up to an integer value, store the result as a single-precision floating-point element in the lower element of "dst", and copy the upper 3 packed elements from "a" to the upper elements of "dst".</description>
+ <operation>
+dst[31:0] := CEIL(b[31:0])
+dst[127:32] := a[127:32]
+ </operation>
+ <instruction name="ROUNDSS" form="xmm, xmm, imm8" xed="ROUNDSS_XMMd_XMMd_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_minpos_epu16">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="UI16"/>
+ <description>Horizontally compute the minimum amongst the packed unsigned 16-bit integers in "a", store the minimum and index in "dst", and zero the remaining bits in "dst".</description>
+ <operation>
+index[2:0] := 0
+min[15:0] := a[15:0]
+FOR j := 0 to 7
+ i := j*16
+ IF a[i+15:i] &lt; min[15:0]
+ index[2:0] := j
+ min[15:0] := a[i+15:i]
+ FI
+ENDFOR
+dst[15:0] := min[15:0]
+dst[18:16] := index[2:0]
+dst[127:19] := 0
+ </operation>
+ <instruction name="PHMINPOSUW" form="xmm, xmm" xed="PHMINPOSUW_XMMdq_XMMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_mpsadbw_epu8">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Arithmetic</category>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compute the sum of absolute differences (SADs) of quadruplets of unsigned 8-bit integers in "a" compared to those in "b", and store the 16-bit results in "dst".
+ Eight SADs are performed using one quadruplet from "b" and eight quadruplets from "a". One quadruplet is selected from "b" starting at on the offset specified in "imm8". Eight quadruplets are formed from sequential 8-bit integers selected from "a" starting at the offset specified in "imm8".</description>
+ <operation>
+DEFINE MPSADBW(a[127:0], b[127:0], imm8[2:0]) {
+ a_offset := imm8[2]*32
+ b_offset := imm8[1:0]*32
+ FOR j := 0 to 7
+ i := j*8
+ k := a_offset+i
+ l := b_offset
+ tmp[i*2+15:i*2] := ABS(Signed(a[k+7:k] - b[l+7:l])) + ABS(Signed(a[k+15:k+8] - b[l+15:l+8])) + \
+ ABS(Signed(a[k+23:k+16] - b[l+23:l+16])) + ABS(Signed(a[k+31:k+24] - b[l+31:l+24]))
+ ENDFOR
+ RETURN tmp[127:0]
+}
+dst[127:0] := MPSADBW(a[127:0], b[127:0], imm8[2:0])
+ </operation>
+ <instruction name="MPSADBW" form="xmm, xmm, imm8" xed="MPSADBW_XMMdq_XMMdq_IMMb"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.1" vexEq="TRUE" name="_mm_stream_load_si128">
+ <type>Integer</type>
+ <CPUID>SSE4.1</CPUID>
+ <category>Load</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i *" varname="mem_addr" etype="M128" memwidth="128"/>
+ <description>Load 128-bits of integer data from memory into "dst" using a non-temporal memory hint.
+ "mem_addr" must be aligned on a 16-byte boundary or a general-protection exception may be generated.</description>
+ <operation>
+dst[127:0] := MEM[mem_addr+127:mem_addr]
+ </operation>
+ <instruction name="MOVNTDQA" form="xmm, m128" xed="MOVNTDQA_XMMdq_MEMdq"/>
+ <header>smmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpistrm">
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and store the generated mask in "dst".
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+BoolRes := 0
+// compare all characters
+aInvalid := 0
+bInvalid := 0
+FOR i := 0 to UpperBound
+ m := i*size
+ FOR j := 0 to UpperBound
+ n := j*size
+ BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0
+
+ // invalidate characters after EOS
+ IF a[m+size-1:m] == 0
+ aInvalid := 1
+ FI
+ IF b[n+size-1:n] == 0
+ bInvalid := 1
+ FI
+
+ // override comparisons for invalid characters
+ CASE (imm8[3:2]) OF
+ 0: // equal any
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 1: // ranges
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 2: // equal each
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ 3: // equal ordered
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ ESAC
+ ENDFOR
+ENDFOR
+// aggregate results
+CASE (imm8[3:2]) OF
+0: // equal any
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j]
+ ENDFOR
+ ENDFOR
+1: // ranges
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1])
+ j += 2
+ ENDFOR
+ ENDFOR
+2: // equal each
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ IntRes1[i] := BoolRes.word[i].bit[i]
+ ENDFOR
+3: // equal ordered
+ IntRes1 := (imm8[0] ? 0xFF : 0xFFFF)
+ FOR i := 0 to UpperBound
+ k := i
+ FOR j := 0 to UpperBound-i
+ IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j]
+ k := k+1
+ ENDFOR
+ ENDFOR
+ESAC
+// optionally negate results
+bInvalid := 0
+FOR i := 0 to UpperBound
+ IF imm8[4]
+ IF imm8[5] // only negate valid
+ IF b[n+size-1:n] == 0
+ bInvalid := 1
+ FI
+ IF bInvalid // invalid, don't negate
+ IntRes2[i] := IntRes1[i]
+ ELSE // valid, negate
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // negate all
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // don't negate
+ IntRes2[i] := IntRes1[i]
+ FI
+ENDFOR
+// output
+IF imm8[6] // byte / word mask
+ FOR i := 0 to UpperBound
+ j := i*size
+ IF IntRes2[i]
+ dst[j+size-1:j] := (imm8[0] ? 0xFF : 0xFFFF)
+ ELSE
+ dst[j+size-1:j] := 0
+ FI
+ ENDFOR
+ELSE // bit mask
+ dst[UpperBound:0] := IntRes2[UpperBound:0]
+ dst[127:UpperBound+1] := 0
+FI
+ </operation>
+ <instruction name="PCMPISTRM" form="xmm, xmm, imm8" xed="PCMPISTRM_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpistri">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and store the generated index in "dst".
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+BoolRes := 0
+// compare all characters
+aInvalid := 0
+bInvalid := 0
+FOR i := 0 to UpperBound
+ m := i*size
+ FOR j := 0 to UpperBound
+ n := j*size
+ BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0
+
+ // invalidate characters after EOS
+ IF a[m+size-1:m] == 0
+ aInvalid := 1
+ FI
+ IF b[n+size-1:n] == 0
+ bInvalid := 1
+ FI
+
+ // override comparisons for invalid characters
+ CASE (imm8[3:2]) OF
+ 0: // equal any
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 1: // ranges
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 2: // equal each
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ 3: // equal ordered
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ ESAC
+ ENDFOR
+ENDFOR
+// aggregate results
+CASE (imm8[3:2]) OF
+0: // equal any
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j]
+ ENDFOR
+ ENDFOR
+1: // ranges
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1])
+ j += 2
+ ENDFOR
+ ENDFOR
+2: // equal each
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ IntRes1[i] := BoolRes.word[i].bit[i]
+ ENDFOR
+3: // equal ordered
+ IntRes1 := (imm8[0] ? 0xFF : 0xFFFF)
+ FOR i := 0 to UpperBound
+ k := i
+ FOR j := 0 to UpperBound-i
+ IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j]
+ k := k+1
+ ENDFOR
+ ENDFOR
+ESAC
+// optionally negate results
+bInvalid := 0
+FOR i := 0 to UpperBound
+ IF imm8[4]
+ IF imm8[5] // only negate valid
+ IF b[n+size-1:n] == 0
+ bInvalid := 1
+ FI
+ IF bInvalid // invalid, don't negate
+ IntRes2[i] := IntRes1[i]
+ ELSE // valid, negate
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // negate all
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // don't negate
+ IntRes2[i] := IntRes1[i]
+ FI
+ENDFOR
+// output
+IF imm8[6] // most significant bit
+ tmp := UpperBound
+ dst := tmp
+ DO WHILE ((tmp &gt;= 0) AND a[tmp] == 0)
+ tmp := tmp - 1
+ dst := tmp
+ OD
+ELSE // least significant bit
+ tmp := 0
+ dst := tmp
+ DO WHILE ((tmp &lt;= UpperBound) AND a[tmp] == 0)
+ tmp := tmp + 1
+ dst := tmp
+ OD
+FI
+ </operation>
+ <instruction name="PCMPISTRI" form="xmm, xmm, imm8" xed="PCMPISTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpistrz">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128" hint="TRUE"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if any character in "b" was null, and 0 otherwise.
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+bInvalid := 0
+FOR j := 0 to UpperBound
+ n := j*size
+ IF b[n+size-1:n] == 0
+ bInvalid := 1
+ FI
+ENDFOR
+dst := bInvalid
+ </operation>
+ <instruction name="PCMPISTRI" form="xmm, xmm, imm8" xed="PCMPISTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpistrc">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if the resulting mask was non-zero, and 0 otherwise.
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+BoolRes := 0
+// compare all characters
+aInvalid := 0
+bInvalid := 0
+FOR i := 0 to UpperBound
+ m := i*size
+ FOR j := 0 to UpperBound
+ n := j*size
+ BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0
+
+ // invalidate characters after EOS
+ IF a[m+size-1:m] == 0
+ aInvalid := 1
+ FI
+ IF b[n+size-1:n] == 0
+ bInvalid := 1
+ FI
+
+ // override comparisons for invalid characters
+ CASE (imm8[3:2]) OF
+ 0: // equal any
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 1: // ranges
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 2: // equal each
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ 3: // equal ordered
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ ESAC
+ ENDFOR
+ENDFOR
+// aggregate results
+CASE (imm8[3:2]) OF
+0: // equal any
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j]
+ ENDFOR
+ ENDFOR
+1: // ranges
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1])
+ j += 2
+ ENDFOR
+ ENDFOR
+2: // equal each
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ IntRes1[i] := BoolRes.word[i].bit[i]
+ ENDFOR
+3: // equal ordered
+ IntRes1 := (imm8[0] ? 0xFF : 0xFFFF)
+ FOR i := 0 to UpperBound
+ k := i
+ FOR j := 0 to UpperBound-i
+ IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j]
+ k := k+1
+ ENDFOR
+ ENDFOR
+ESAC
+// optionally negate results
+bInvalid := 0
+FOR i := 0 to UpperBound
+ IF imm8[4]
+ IF imm8[5] // only negate valid
+ IF b[n+size-1:n] == 0
+ bInvalid := 1
+ FI
+ IF bInvalid // invalid, don't negate
+ IntRes2[i] := IntRes1[i]
+ ELSE // valid, negate
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // negate all
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // don't negate
+ IntRes2[i] := IntRes1[i]
+ FI
+ENDFOR
+// output
+dst := (IntRes2 != 0)
+ </operation>
+ <instruction name="PCMPISTRI" form="xmm, xmm, imm8" xed="PCMPISTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpistrs">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128" hint="TRUE"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if any character in "a" was null, and 0 otherwise.
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+aInvalid := 0
+FOR i := 0 to UpperBound
+ m := i*size
+ IF a[m+size-1:m] == 0
+ aInvalid := 1
+ FI
+ENDFOR
+dst := aInvalid
+ </operation>
+ <instruction name="PCMPISTRI" form="xmm, xmm, imm8" xed="PCMPISTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpistro">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns bit 0 of the resulting bit mask.
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+BoolRes := 0
+// compare all characters
+aInvalid := 0
+bInvalid := 0
+FOR i := 0 to UpperBound
+ m := i*size
+ FOR j := 0 to UpperBound
+ n := j*size
+ BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0
+
+ // invalidate characters after EOS
+ IF a[m+size-1:m] == 0
+ aInvalid := 1
+ FI
+ IF b[n+size-1:n] == 0
+ bInvalid := 1
+ FI
+
+ // override comparisons for invalid characters
+ CASE (imm8[3:2]) OF
+ 0: // equal any
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 1: // ranges
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 2: // equal each
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ 3: // equal ordered
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ ESAC
+ ENDFOR
+ENDFOR
+// aggregate results
+CASE (imm8[3:2]) OF
+0: // equal any
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j]
+ ENDFOR
+ ENDFOR
+1: // ranges
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1])
+ j += 2
+ ENDFOR
+ ENDFOR
+2: // equal each
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ IntRes1[i] := BoolRes.word[i].bit[i]
+ ENDFOR
+3: // equal ordered
+ IntRes1 := (imm8[0] ? 0xFF : 0xFFFF)
+ FOR i := 0 to UpperBound
+ k := i
+ FOR j := 0 to UpperBound-i
+ IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j]
+ k := k+1
+ ENDFOR
+ ENDFOR
+ESAC
+// optionally negate results
+bInvalid := 0
+FOR i := 0 to UpperBound
+ IF imm8[4]
+ IF imm8[5] // only negate valid
+ IF b[n+size-1:n] == 0
+ bInvalid := 1
+ FI
+ IF bInvalid // invalid, don't negate
+ IntRes2[i] := IntRes1[i]
+ ELSE // valid, negate
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // negate all
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // don't negate
+ IntRes2[i] := IntRes1[i]
+ FI
+ENDFOR
+// output
+dst := IntRes2[0]
+ </operation>
+ <instruction name="PCMPISTRI" form="xmm, xmm, imm8" xed="PCMPISTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpistra">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings with implicit lengths in "a" and "b" using the control in "imm8", and returns 1 if "b" did not contain a null character and the resulting mask was zero, and 0 otherwise.
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+BoolRes := 0
+// compare all characters
+aInvalid := 0
+bInvalid := 0
+FOR i := 0 to UpperBound
+ m := i*size
+ FOR j := 0 to UpperBound
+ n := j*size
+ BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0
+
+ // invalidate characters after EOS
+ IF a[m+size-1:m] == 0
+ aInvalid := 1
+ FI
+ IF b[n+size-1:n] == 0
+ bInvalid := 1
+ FI
+
+ // override comparisons for invalid characters
+ CASE (imm8[3:2]) OF
+ 0: // equal any
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 1: // ranges
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 2: // equal each
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ 3: // equal ordered
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ ESAC
+ ENDFOR
+ENDFOR
+// aggregate results
+CASE (imm8[3:2]) OF
+0: // equal any
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j]
+ ENDFOR
+ ENDFOR
+1: // ranges
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1])
+ j += 2
+ ENDFOR
+ ENDFOR
+2: // equal each
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ IntRes1[i] := BoolRes.word[i].bit[i]
+ ENDFOR
+3: // equal ordered
+ IntRes1 := (imm8[0] ? 0xFF : 0xFFFF)
+ FOR i := 0 to UpperBound
+ k := i
+ FOR j := 0 to UpperBound-i
+ IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j]
+ k := k+1
+ ENDFOR
+ ENDFOR
+ESAC
+// optionally negate results
+bInvalid := 0
+FOR i := 0 to UpperBound
+ IF imm8[4]
+ IF imm8[5] // only negate valid
+ IF b[n+size-1:n] == 0
+ bInvalid := 1
+ FI
+ IF bInvalid // invalid, don't negate
+ IntRes2[i] := IntRes1[i]
+ ELSE // valid, negate
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // negate all
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // don't negate
+ IntRes2[i] := IntRes1[i]
+ FI
+ENDFOR
+// output
+dst := (IntRes2 == 0) AND bInvalid
+ </operation>
+ <instruction name="PCMPISTRI" form="xmm, xmm, imm8" xed="PCMPISTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpestrm">
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="__m128i" varname="dst" etype="M128"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="int" varname="la" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="int" varname="lb" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and store the generated mask in "dst".
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+BoolRes := 0
+// compare all characters
+aInvalid := 0
+bInvalid := 0
+FOR i := 0 to UpperBound
+ m := i*size
+ FOR j := 0 to UpperBound
+ n := j*size
+ BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0
+
+ // invalidate characters after EOS
+ IF i == la
+ aInvalid := 1
+ FI
+ IF j == lb
+ bInvalid := 1
+ FI
+
+ // override comparisons for invalid characters
+ CASE (imm8[3:2]) OF
+ 0: // equal any
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 1: // ranges
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 2: // equal each
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ 3: // equal ordered
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ ESAC
+ ENDFOR
+ENDFOR
+// aggregate results
+CASE (imm8[3:2]) OF
+0: // equal any
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j]
+ ENDFOR
+ ENDFOR
+1: // ranges
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1])
+ j += 2
+ ENDFOR
+ ENDFOR
+2: // equal each
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ IntRes1[i] := BoolRes.word[i].bit[i]
+ ENDFOR
+3: // equal ordered
+ IntRes1 := (imm8[0] ? 0xFF : 0xFFFF)
+ FOR i := 0 to UpperBound
+ k := i
+ FOR j := 0 to UpperBound-i
+ IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j]
+ k := k+1
+ ENDFOR
+ ENDFOR
+ESAC
+// optionally negate results
+FOR i := 0 to UpperBound
+ IF imm8[4]
+ IF imm8[5] // only negate valid
+ IF i &gt;= lb // invalid, don't negate
+ IntRes2[i] := IntRes1[i]
+ ELSE // valid, negate
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // negate all
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // don't negate
+ IntRes2[i] := IntRes1[i]
+ FI
+ENDFOR
+// output
+IF imm8[6] // byte / word mask
+ FOR i := 0 to UpperBound
+ j := i*size
+ IF IntRes2[i]
+ dst[j+size-1:j] := (imm8[0] ? 0xFF : 0xFFFF)
+ ELSE
+ dst[j+size-1:j] := 0
+ FI
+ ENDFOR
+ELSE // bit mask
+ dst[UpperBound:0] := IntRes2[UpperBound:0]
+ dst[127:UpperBound+1] := 0
+FI
+ </operation>
+ <instruction name="PCMPESTRM" form="xmm, xmm, imm8" xed="PCMPESTRM_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpestri">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="int" varname="la" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="int" varname="lb" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and store the generated index in "dst".
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+BoolRes := 0
+// compare all characters
+aInvalid := 0
+bInvalid := 0
+FOR i := 0 to UpperBound
+ m := i*size
+ FOR j := 0 to UpperBound
+ n := j*size
+ BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0
+
+ // invalidate characters after EOS
+ IF i == la
+ aInvalid := 1
+ FI
+ IF j == lb
+ bInvalid := 1
+ FI
+
+ // override comparisons for invalid characters
+ CASE (imm8[3:2]) OF
+ 0: // equal any
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 1: // ranges
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 2: // equal each
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ 3: // equal ordered
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ ESAC
+ ENDFOR
+ENDFOR
+// aggregate results
+CASE (imm8[3:2]) OF
+0: // equal any
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j]
+ ENDFOR
+ ENDFOR
+1: // ranges
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1])
+ j += 2
+ ENDFOR
+ ENDFOR
+2: // equal each
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ IntRes1[i] := BoolRes.word[i].bit[i]
+ ENDFOR
+3: // equal ordered
+ IntRes1 := (imm8[0] ? 0xFF : 0xFFFF)
+ FOR i := 0 to UpperBound
+ k := i
+ FOR j := 0 to UpperBound-i
+ IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j]
+ k := k+1
+ ENDFOR
+ ENDFOR
+ESAC
+// optionally negate results
+FOR i := 0 to UpperBound
+ IF imm8[4]
+ IF imm8[5] // only negate valid
+ IF i &gt;= lb // invalid, don't negate
+ IntRes2[i] := IntRes1[i]
+ ELSE // valid, negate
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // negate all
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // don't negate
+ IntRes2[i] := IntRes1[i]
+ FI
+ENDFOR
+// output
+IF imm8[6] // most significant bit
+ tmp := UpperBound
+ dst := tmp
+ DO WHILE ((tmp &gt;= 0) AND a[tmp] == 0)
+ tmp := tmp - 1
+ dst := tmp
+ OD
+ELSE // least significant bit
+ tmp := 0
+ dst := tmp
+ DO WHILE ((tmp &lt;= UpperBound) AND a[tmp] == 0)
+ tmp := tmp + 1
+ dst := tmp
+ OD
+FI
+ </operation>
+ <instruction name="PCMPESTRI" form="xmm, xmm, imm8" xed="PCMPESTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpestrz">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128" hint="TRUE"/>
+ <parameter type="int" varname="la" etype="UI32" hint="TRUE"/>
+ <parameter type="__m128i" varname="b" etype="M128" hint="TRUE"/>
+ <parameter type="int" varname="lb" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if any character in "b" was null, and 0 otherwise.
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+dst := (lb &lt;= UpperBound)
+ </operation>
+ <instruction name="PCMPESTRI" form="xmm, xmm, imm8" xed="PCMPESTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpestrc">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="int" varname="la" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="int" varname="lb" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if the resulting mask was non-zero, and 0 otherwise.
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+BoolRes := 0
+// compare all characters
+aInvalid := 0
+bInvalid := 0
+FOR i := 0 to UpperBound
+ m := i*size
+ FOR j := 0 to UpperBound
+ n := j*size
+ BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0
+
+ // invalidate characters after EOS
+ IF i == la
+ aInvalid := 1
+ FI
+ IF j == lb
+ bInvalid := 1
+ FI
+
+ // override comparisons for invalid characters
+ CASE (imm8[3:2]) OF
+ 0: // equal any
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 1: // ranges
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 2: // equal each
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ 3: // equal ordered
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ ESAC
+ ENDFOR
+ENDFOR
+// aggregate results
+CASE (imm8[3:2]) OF
+0: // equal any
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j]
+ ENDFOR
+ ENDFOR
+1: // ranges
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1])
+ j += 2
+ ENDFOR
+ ENDFOR
+2: // equal each
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ IntRes1[i] := BoolRes.word[i].bit[i]
+ ENDFOR
+3: // equal ordered
+ IntRes1 := (imm8[0] ? 0xFF : 0xFFFF)
+ FOR i := 0 to UpperBound
+ k := i
+ FOR j := 0 to UpperBound-i
+ IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j]
+ k := k+1
+ ENDFOR
+ ENDFOR
+ESAC
+// optionally negate results
+FOR i := 0 to UpperBound
+ IF imm8[4]
+ IF imm8[5] // only negate valid
+ IF i &gt;= lb // invalid, don't negate
+ IntRes2[i] := IntRes1[i]
+ ELSE // valid, negate
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // negate all
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // don't negate
+ IntRes2[i] := IntRes1[i]
+ FI
+ENDFOR
+// output
+dst := (IntRes2 != 0)
+ </operation>
+ <instruction name="PCMPESTRI" form="xmm, xmm, imm8" xed="PCMPESTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpestrs">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128" hint="TRUE"/>
+ <parameter type="int" varname="la" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="M128" hint="TRUE"/>
+ <parameter type="int" varname="lb" etype="UI32" hint="TRUE"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if any character in "a" was null, and 0 otherwise.
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+dst := (la &lt;= UpperBound)
+ </operation>
+ <instruction name="PCMPESTRI" form="xmm, xmm, imm8" xed="PCMPESTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpestro">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="int" varname="la" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="int" varname="lb" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns bit 0 of the resulting bit mask.
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+BoolRes := 0
+// compare all characters
+aInvalid := 0
+bInvalid := 0
+FOR i := 0 to UpperBound
+ m := i*size
+ FOR j := 0 to UpperBound
+ n := j*size
+ BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0
+
+ // invalidate characters after EOS
+ IF i == la
+ aInvalid := 1
+ FI
+ IF j == lb
+ bInvalid := 1
+ FI
+
+ // override comparisons for invalid characters
+ CASE (imm8[3:2]) OF
+ 0: // equal any
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 1: // ranges
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 2: // equal each
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ 3: // equal ordered
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ ESAC
+ ENDFOR
+ENDFOR
+// aggregate results
+CASE (imm8[3:2]) OF
+0: // equal any
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j]
+ ENDFOR
+ ENDFOR
+1: // ranges
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1])
+ j += 2
+ ENDFOR
+ ENDFOR
+2: // equal each
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ IntRes1[i] := BoolRes.word[i].bit[i]
+ ENDFOR
+3: // equal ordered
+ IntRes1 := (imm8[0] ? 0xFF : 0xFFFF)
+ FOR i := 0 to UpperBound
+ k := i
+ FOR j := 0 to UpperBound-i
+ IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j]
+ k := k+1
+ ENDFOR
+ ENDFOR
+ESAC
+// optionally negate results
+FOR i := 0 to UpperBound
+ IF imm8[4]
+ IF imm8[5] // only negate valid
+ IF i &gt;= lb // invalid, don't negate
+ IntRes2[i] := IntRes1[i]
+ ELSE // valid, negate
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // negate all
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // don't negate
+ IntRes2[i] := IntRes1[i]
+ FI
+ENDFOR
+// output
+dst := IntRes2[0]
+ </operation>
+ <instruction name="PCMPESTRI" form="xmm, xmm, imm8" xed="PCMPESTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpestra">
+ <type>Flag</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>String Compare</category>
+ <return type="int" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="M128"/>
+ <parameter type="int" varname="la" etype="UI32"/>
+ <parameter type="__m128i" varname="b" etype="M128"/>
+ <parameter type="int" varname="lb" etype="UI32"/>
+ <parameter type="const int" varname="imm8" etype="IMM" immwidth="8"/>
+ <description>Compare packed strings in "a" and "b" with lengths "la" and "lb" using the control in "imm8", and returns 1 if "b" did not contain a null character and the resulting mask was zero, and 0 otherwise.
+ [strcmp_note]</description>
+ <operation>
+size := (imm8[0] ? 16 : 8) // 8 or 16-bit characters
+UpperBound := (128 / size) - 1
+BoolRes := 0
+// compare all characters
+aInvalid := 0
+bInvalid := 0
+FOR i := 0 to UpperBound
+ m := i*size
+ FOR j := 0 to UpperBound
+ n := j*size
+ BoolRes.word[i].bit[j] := (a[m+size-1:m] == b[n+size-1:n]) ? 1 : 0
+
+ // invalidate characters after EOS
+ IF i == la
+ aInvalid := 1
+ FI
+ IF j == lb
+ bInvalid := 1
+ FI
+
+ // override comparisons for invalid characters
+ CASE (imm8[3:2]) OF
+ 0: // equal any
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 1: // ranges
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ FI
+ 2: // equal each
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ 3: // equal ordered
+ IF (!aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 0
+ ELSE IF (aInvalid &amp;&amp; !bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ ELSE IF (aInvalid &amp;&amp; bInvalid)
+ BoolRes.word[i].bit[j] := 1
+ FI
+ ESAC
+ ENDFOR
+ENDFOR
+// aggregate results
+CASE (imm8[3:2]) OF
+0: // equal any
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR BoolRes.word[i].bit[j]
+ ENDFOR
+ ENDFOR
+1: // ranges
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ FOR j := 0 to UpperBound
+ IntRes1[i] := IntRes1[i] OR (BoolRes.word[i].bit[j] AND BoolRes.word[i].bit[j+1])
+ j += 2
+ ENDFOR
+ ENDFOR
+2: // equal each
+ IntRes1 := 0
+ FOR i := 0 to UpperBound
+ IntRes1[i] := BoolRes.word[i].bit[i]
+ ENDFOR
+3: // equal ordered
+ IntRes1 := (imm8[0] ? 0xFF : 0xFFFF)
+ FOR i := 0 to UpperBound
+ k := i
+ FOR j := 0 to UpperBound-i
+ IntRes1[i] := IntRes1[i] AND BoolRes.word[k].bit[j]
+ k := k+1
+ ENDFOR
+ ENDFOR
+ESAC
+// optionally negate results
+FOR i := 0 to UpperBound
+ IF imm8[4]
+ IF imm8[5] // only negate valid
+ IF i &gt;= lb // invalid, don't negate
+ IntRes2[i] := IntRes1[i]
+ ELSE // valid, negate
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // negate all
+ IntRes2[i] := -1 XOR IntRes1[i]
+ FI
+ ELSE // don't negate
+ IntRes2[i] := IntRes1[i]
+ FI
+ENDFOR
+// output
+dst := (IntRes2 == 0) AND (lb &gt; UpperBound)
+ </operation>
+ <instruction name="PCMPESTRI" form="xmm, xmm, imm8" xed="PCMPESTRI_XMMdq_XMMdq_IMMb"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" vexEq="TRUE" name="_mm_cmpgt_epi64">
+ <type>Integer</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>Compare</category>
+ <return type="__m128i" varname="dst" etype="UI64"/>
+ <parameter type="__m128i" varname="a" etype="SI64"/>
+ <parameter type="__m128i" varname="b" etype="SI64"/>
+ <description>Compare packed signed 64-bit integers in "a" and "b" for greater-than, and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*64
+ dst[i+63:i] := ( a[i+63:i] &gt; b[i+63:i] ) ? 0xFFFFFFFFFFFFFFFF : 0
+ENDFOR
+ </operation>
+ <instruction name="PCMPGTQ" form="xmm, xmm" xed="PCMPGTQ_XMMdq_XMMdq"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" name="_mm_crc32_u8">
+ <type>Integer</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>Cryptography</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="crc" etype="UI32"/>
+ <parameter type="unsigned char" varname="v" etype="UI8"/>
+ <description>Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 8-bit integer "v", and stores the result in "dst".</description>
+ <operation>tmp1[7:0] := v[0:7] // bit reflection
+tmp2[31:0] := crc[0:31] // bit reflection
+tmp3[39:0] := tmp1[7:0] &lt;&lt; 32
+tmp4[39:0] := tmp2[31:0] &lt;&lt; 8
+tmp5[39:0] := tmp3[39:0] XOR tmp4[39:0]
+tmp6[31:0] := MOD2(tmp5[39:0], 0x11EDC6F41) // remainder from polynomial division modulus 2
+dst[31:0] := tmp6[0:31] // bit reflection
+ </operation>
+ <instruction name="CRC32" form="r32, r8" xed="CRC32_GPRyy_GPR8b"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" name="_mm_crc32_u16">
+ <type>Integer</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>Cryptography</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="crc" etype="UI32"/>
+ <parameter type="unsigned short" varname="v" etype="UI16"/>
+ <description>Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 16-bit integer "v", and stores the result in "dst".</description>
+ <operation>tmp1[15:0] := v[0:15] // bit reflection
+tmp2[31:0] := crc[0:31] // bit reflection
+tmp3[47:0] := tmp1[15:0] &lt;&lt; 32
+tmp4[47:0] := tmp2[31:0] &lt;&lt; 16
+tmp5[47:0] := tmp3[47:0] XOR tmp4[47:0]
+tmp6[31:0] := MOD2(tmp5[47:0], 0x11EDC6F41) // remainder from polynomial division modulus 2
+dst[31:0] := tmp6[0:31] // bit reflection
+ </operation>
+ <instruction name="CRC32" form="r32, r16" xed="CRC32_GPRyy_GPRv"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" name="_mm_crc32_u32">
+ <type>Integer</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>Cryptography</category>
+ <return type="unsigned int" varname="dst" etype="UI32"/>
+ <parameter type="unsigned int" varname="crc" etype="UI32"/>
+ <parameter type="unsigned int" varname="v" etype="UI32"/>
+ <description>Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 32-bit integer "v", and stores the result in "dst".</description>
+ <operation>tmp1[31:0] := v[0:31] // bit reflection
+tmp2[31:0] := crc[0:31] // bit reflection
+tmp3[63:0] := tmp1[31:0] &lt;&lt; 32
+tmp4[63:0] := tmp2[31:0] &lt;&lt; 32
+tmp5[63:0] := tmp3[63:0] XOR tmp4[63:0]
+tmp6[31:0] := MOD2(tmp5[63:0], 0x11EDC6F41) // remainder from polynomial division modulus 2
+dst[31:0] := tmp6[0:31] // bit reflection
+ </operation>
+ <instruction name="CRC32" form="r32, r32" xed="CRC32_GPRyy_GPRv"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSE4.2" name="_mm_crc32_u64">
+ <type>Integer</type>
+ <CPUID>SSE4.2</CPUID>
+ <category>Cryptography</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="crc" etype="UI64"/>
+ <parameter type="unsigned __int64" varname="v" etype="UI64"/>
+ <description>Starting with the initial value in "crc", accumulates a CRC32 value for unsigned 64-bit integer "v", and stores the result in "dst".</description>
+ <operation>tmp1[63:0] := v[0:63] // bit reflection
+tmp2[31:0] := crc[0:31] // bit reflection
+tmp3[95:0] := tmp1[31:0] &lt;&lt; 32
+tmp4[95:0] := tmp2[63:0] &lt;&lt; 64
+tmp5[95:0] := tmp3[95:0] XOR tmp4[95:0]
+tmp6[31:0] := MOD2(tmp5[95:0], 0x11EDC6F41) // remainder from polynomial division modulus 2
+dst[31:0] := tmp6[0:31] // bit reflection
+ </operation>
+ <instruction name="CRC32" form="r64, r64" xed="CRC32_GPRyy_GPRv"/>
+ <header>nmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_abs_pi8">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="SI8"/>
+ <description>Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ dst[i+7:i] := ABS(Int(a[i+7:i]))
+ENDFOR
+ </operation>
+ <instruction name="PABSB" form="mm, mm" xed="PABSB_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_abs_epi8">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <description>Compute the absolute value of packed signed 8-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ dst[i+7:i] := ABS(a[i+7:i])
+ENDFOR
+ </operation>
+ <instruction name="PABSB" form="xmm, xmm" xed="PABSB_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_abs_pi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <description>Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := ABS(Int(a[i+15:i]))
+ENDFOR
+ </operation>
+ <instruction name="PABSW" form="mm, mm" xed="PABSW_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_abs_epi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <description>Compute the absolute value of packed signed 16-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := ABS(a[i+15:i])
+ENDFOR
+ </operation>
+ <instruction name="PABSW" form="xmm, xmm" xed="PABSW_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_abs_pi32">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m64" varname="dst" etype="UI32"/>
+ <parameter type="__m64" varname="a" etype="SI32"/>
+ <description>Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ dst[i+31:i] := ABS(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="PABSD" form="mm, mm" xed="PABSD_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_abs_epi32">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Special Math Functions</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <description>Compute the absolute value of packed signed 32-bit integers in "a", and store the unsigned results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ dst[i+31:i] := ABS(a[i+31:i])
+ENDFOR
+ </operation>
+ <instruction name="PABSD" form="xmm, xmm" xed="PABSD_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_shuffle_epi8">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Swizzle</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <description>Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF b[i+7] == 1
+ dst[i+7:i] := 0
+ ELSE
+ index[3:0] := b[i+3:i]
+ dst[i+7:i] := a[index*8+7:index*8]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSHUFB" form="xmm, xmm" xed="PSHUFB_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_shuffle_pi8">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Swizzle</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <description>Shuffle packed 8-bit integers in "a" according to shuffle control mask in the corresponding 8-bit element of "b", and store the results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ IF b[i+7] == 1
+ dst[i+7:i] := 0
+ ELSE
+ index[2:0] := b[i+2:i]
+ dst[i+7:i] := a[index*8+7:index*8]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSHUFB" form="mm, mm" xed="PSHUFB_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_alignr_epi8">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="UI8"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="5"/>
+ <description>Concatenate 16-byte blocks in "a" and "b" into a 32-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst".</description>
+ <operation>
+tmp[255:0] := ((a[127:0] &lt;&lt; 128)[255:0] OR b[127:0]) &gt;&gt; (imm8*8)
+dst[127:0] := tmp[127:0]
+ </operation>
+ <instruction name="PALIGNR" form="xmm, xmm, imm8" xed="PALIGNR_XMMdq_XMMdq_IMMb"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_alignr_pi8">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Miscellaneous</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="UI8"/>
+ <parameter type="int" varname="imm8" etype="IMM" immwidth="4"/>
+ <description>Concatenate 8-byte blocks in "a" and "b" into a 16-byte temporary result, shift the result right by "imm8" bytes, and store the low 16 bytes in "dst".</description>
+ <operation>
+tmp[127:0] := ((a[63:0] &lt;&lt; 64)[127:0] OR b[63:0]) &gt;&gt; (imm8*8)
+dst[63:0] := tmp[63:0]
+ </operation>
+ <instruction name="PALIGNR" form="mm, mm, imm8" xed="PALIGNR_MMXq_MMXq_IMMb"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_hadd_epi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := a[31:16] + a[15:0]
+dst[31:16] := a[63:48] + a[47:32]
+dst[47:32] := a[95:80] + a[79:64]
+dst[63:48] := a[127:112] + a[111:96]
+dst[79:64] := b[31:16] + b[15:0]
+dst[95:80] := b[63:48] + b[47:32]
+dst[111:96] := b[95:80] + b[79:64]
+dst[127:112] := b[127:112] + b[111:96]
+ </operation>
+ <instruction name="PHADDW" form="xmm, xmm" xed="PHADDW_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_hadds_epi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Horizontally add adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := Saturate16(a[31:16] + a[15:0])
+dst[31:16] := Saturate16(a[63:48] + a[47:32])
+dst[47:32] := Saturate16(a[95:80] + a[79:64])
+dst[63:48] := Saturate16(a[127:112] + a[111:96])
+dst[79:64] := Saturate16(b[31:16] + b[15:0])
+dst[95:80] := Saturate16(b[63:48] + b[47:32])
+dst[111:96] := Saturate16(b[95:80] + b[79:64])
+dst[127:112] := Saturate16(b[127:112] + b[111:96])
+ </operation>
+ <instruction name="PHADDSW" form="xmm, xmm" xed="PHADDSW_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_hadd_epi32">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst".</description>
+ <operation>
+dst[31:0] := a[63:32] + a[31:0]
+dst[63:32] := a[127:96] + a[95:64]
+dst[95:64] := b[63:32] + b[31:0]
+dst[127:96] := b[127:96] + b[95:64]
+ </operation>
+ <instruction name="PHADDD" form="xmm, xmm" xed="PHADDD_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_hadd_pi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="SI16"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Horizontally add adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := a[31:16] + a[15:0]
+dst[31:16] := a[63:48] + a[47:32]
+dst[47:32] := b[31:16] + b[15:0]
+dst[63:48] := b[63:48] + b[47:32]
+ </operation>
+ <instruction name="PHADDW" form="mm, mm" xed="PHADDW_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_hadd_pi32">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="SI32"/>
+ <parameter type="__m64" varname="a" etype="SI32"/>
+ <parameter type="__m64" varname="b" etype="SI32"/>
+ <description>Horizontally add adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst".</description>
+ <operation>
+dst[31:0] := a[63:32] + a[31:0]
+dst[63:32] := b[63:32] + b[31:0]
+ </operation>
+ <instruction name="PHADDW" form="mm, mm" xed="PHADDW_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_hadds_pi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="SI16"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Horizontally add adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := Saturate16(a[31:16] + a[15:0])
+dst[31:16] := Saturate16(a[63:48] + a[47:32])
+dst[47:32] := Saturate16(b[31:16] + b[15:0])
+dst[63:48] := Saturate16(b[63:48] + b[47:32])
+ </operation>
+ <instruction name="PHADDSW" form="mm, mm" xed="PHADDSW_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_hsub_epi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := a[15:0] - a[31:16]
+dst[31:16] := a[47:32] - a[63:48]
+dst[47:32] := a[79:64] - a[95:80]
+dst[63:48] := a[111:96] - a[127:112]
+dst[79:64] := b[15:0] - b[31:16]
+dst[95:80] := b[47:32] - b[63:48]
+dst[111:96] := b[79:64] - b[95:80]
+dst[127:112] := b[111:96] - b[127:112]
+ </operation>
+ <instruction name="PHSUBW" form="xmm, xmm" xed="PHSUBW_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_hsubs_epi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Horizontally subtract adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := Saturate16(a[15:0] - a[31:16])
+dst[31:16] := Saturate16(a[47:32] - a[63:48])
+dst[47:32] := Saturate16(a[79:64] - a[95:80])
+dst[63:48] := Saturate16(a[111:96] - a[127:112])
+dst[79:64] := Saturate16(b[15:0] - b[31:16])
+dst[95:80] := Saturate16(b[47:32] - b[63:48])
+dst[111:96] := Saturate16(b[79:64] - b[95:80])
+dst[127:112] := Saturate16(b[111:96] - b[127:112])
+ </operation>
+ <instruction name="PHSUBSW" form="xmm, xmm" xed="PHSUBSW_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_hsub_epi32">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst".</description>
+ <operation>
+dst[31:0] := a[31:0] - a[63:32]
+dst[63:32] := a[95:64] - a[127:96]
+dst[95:64] := b[31:0] - b[63:32]
+dst[127:96] := b[95:64] - b[127:96]
+ </operation>
+ <instruction name="PHSUBD" form="xmm, xmm" xed="PHSUBD_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_hsub_pi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="SI16"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Horizontally subtract adjacent pairs of 16-bit integers in "a" and "b", and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := a[15:0] - a[31:16]
+dst[31:16] := a[47:32] - a[63:48]
+dst[47:32] := b[15:0] - b[31:16]
+dst[63:48] := b[47:32] - b[63:48]
+ </operation>
+ <instruction name="PHSUBW" form="mm, mm" xed="PHSUBW_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_hsub_pi32">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="SI32"/>
+ <parameter type="__m64" varname="a" etype="SI32"/>
+ <parameter type="__m64" varname="b" etype="SI32"/>
+ <description>Horizontally subtract adjacent pairs of 32-bit integers in "a" and "b", and pack the signed 32-bit results in "dst".</description>
+ <operation>
+dst[31:0] := a[31:0] - a[63:32]
+dst[63:32] := b[31:0] - b[63:32]
+ </operation>
+ <instruction name="PHSUBD" form="mm, mm" xed="PHSUBD_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_hsubs_pi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="SI16"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Horizontally subtract adjacent pairs of signed 16-bit integers in "a" and "b" using saturation, and pack the signed 16-bit results in "dst".</description>
+ <operation>
+dst[15:0] := Saturate16(a[15:0] - a[31:16])
+dst[31:16] := Saturate16(a[47:32] - a[63:48])
+dst[47:32] := Saturate16(b[15:0] - b[31:16])
+dst[63:48] := Saturate16(b[47:32] - b[63:48])
+ </operation>
+ <instruction name="PHSUBSW" form="mm, mm" xed="PHSUBSW_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_maddubs_epi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="SI16"/>
+ <parameter type="__m128i" varname="a" etype="UI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] )
+ENDFOR
+ </operation>
+ <instruction name="PMADDUBSW" form="xmm, xmm" xed="PMADDUBSW_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_maddubs_pi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="SI16"/>
+ <parameter type="__m64" varname="a" etype="UI8"/>
+ <parameter type="__m64" varname="b" etype="SI8"/>
+ <description>Vertically multiply each unsigned 8-bit integer from "a" with the corresponding signed 8-bit integer from "b", producing intermediate signed 16-bit integers. Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the saturated results in "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ dst[i+15:i] := Saturate16( a[i+15:i+8]*b[i+15:i+8] + a[i+7:i]*b[i+7:i] )
+ENDFOR
+ </operation>
+ <instruction name="PMADDUBSW" form="mm, mm" xed="PMADDUBSW_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_mulhrs_epi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst".</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) &gt;&gt; 14) + 1
+ dst[i+15:i] := tmp[16:1]
+ENDFOR
+ </operation>
+ <instruction name="PMULHRSW" form="xmm, xmm" xed="PMULHRSW_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_mulhrs_pi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Multiply packed signed 16-bit integers in "a" and "b", producing intermediate signed 32-bit integers. Truncate each intermediate integer to the 18 most significant bits, round by adding 1, and store bits [16:1] to "dst".</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ tmp[31:0] := ((SignExtend32(a[i+15:i]) * SignExtend32(b[i+15:i])) &gt;&gt; 14) + 1
+ dst[i+15:i] := tmp[16:1]
+ENDFOR
+ </operation>
+ <instruction name="PMULHRSW" form="mm, mm" xed="PMULHRSW_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_sign_epi8">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI8"/>
+ <parameter type="__m128i" varname="a" etype="SI8"/>
+ <parameter type="__m128i" varname="b" etype="SI8"/>
+ <description>Negate packed 8-bit integers in "a" when the corresponding signed 8-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero.</description>
+ <operation>
+FOR j := 0 to 15
+ i := j*8
+ IF b[i+7:i] &lt; 0
+ dst[i+7:i] := -(a[i+7:i])
+ ELSE IF b[i+7:i] == 0
+ dst[i+7:i] := 0
+ ELSE
+ dst[i+7:i] := a[i+7:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSIGNB" form="xmm, xmm" xed="PSIGNB_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_sign_epi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI16"/>
+ <parameter type="__m128i" varname="a" etype="SI16"/>
+ <parameter type="__m128i" varname="b" etype="SI16"/>
+ <description>Negate packed 16-bit integers in "a" when the corresponding signed 16-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*16
+ IF b[i+15:i] &lt; 0
+ dst[i+15:i] := -(a[i+15:i])
+ ELSE IF b[i+15:i] == 0
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSIGNW" form="xmm, xmm" xed="PSIGNW_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" vexEq="TRUE" name="_mm_sign_epi32">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m128i" varname="dst" etype="UI32"/>
+ <parameter type="__m128i" varname="a" etype="SI32"/>
+ <parameter type="__m128i" varname="b" etype="SI32"/>
+ <description>Negate packed 32-bit integers in "a" when the corresponding signed 32-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*32
+ IF b[i+31:i] &lt; 0
+ dst[i+31:i] := -(a[i+31:i])
+ ELSE IF b[i+31:i] == 0
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSIGND" form="xmm, xmm" xed="PSIGND_XMMdq_XMMdq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_sign_pi8">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="UI8"/>
+ <parameter type="__m64" varname="a" etype="SI8"/>
+ <parameter type="__m64" varname="b" etype="SI8"/>
+ <description>Negate packed 8-bit integers in "a" when the corresponding signed 8-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero.</description>
+ <operation>
+FOR j := 0 to 7
+ i := j*8
+ IF b[i+7:i] &lt; 0
+ dst[i+7:i] := -(a[i+7:i])
+ ELSE IF b[i+7:i] == 0
+ dst[i+7:i] := 0
+ ELSE
+ dst[i+7:i] := a[i+7:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSIGNB" form="mm, mm" xed="PSIGNB_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_sign_pi16">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="UI16"/>
+ <parameter type="__m64" varname="a" etype="SI16"/>
+ <parameter type="__m64" varname="b" etype="SI16"/>
+ <description>Negate packed 16-bit integers in "a" when the corresponding signed 16-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero.</description>
+ <operation>
+FOR j := 0 to 3
+ i := j*16
+ IF b[i+15:i] &lt; 0
+ dst[i+15:i] := -(a[i+15:i])
+ ELSE IF b[i+15:i] == 0
+ dst[i+15:i] := 0
+ ELSE
+ dst[i+15:i] := a[i+15:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSIGNW" form="mm, mm" xed="PSIGNW_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="SSSE3" name="_mm_sign_pi32">
+ <type>Integer</type>
+ <CPUID>SSSE3</CPUID>
+ <category>Arithmetic</category>
+ <return type="__m64" varname="dst" etype="UI32"/>
+ <parameter type="__m64" varname="a" etype="SI32"/>
+ <parameter type="__m64" varname="b" etype="SI32"/>
+ <description>Negate packed 32-bit integers in "a" when the corresponding signed 32-bit integer in "b" is negative, and store the results in "dst". Element in "dst" are zeroed out when the corresponding element in "b" is zero.</description>
+ <operation>
+FOR j := 0 to 1
+ i := j*32
+ IF b[i+31:i] &lt; 0
+ dst[i+31:i] := -(a[i+31:i])
+ ELSE IF b[i+31:i] == 0
+ dst[i+31:i] := 0
+ ELSE
+ dst[i+31:i] := a[i+31:i]
+ FI
+ENDFOR
+ </operation>
+ <instruction name="PSIGND" form="mm, mm" xed="PSIGND_MMXq_MMXq"/>
+ <header>tmmintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_rdtsc">
+ <CPUID>TSC</CPUID>
+ <category>General Support</category>
+ <return type="__int64" varname="dst" etype="UI64"/>
+ <parameter type="void"/>
+ <description>Copy the current 64-bit value of the processor's time-stamp counter into "dst".</description>
+ <operation>dst[63:0] := TimeStampCounter
+ </operation>
+ <instruction name="RDTSC" xed="RDTSC"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xsusldtrk">
+ <CPUID>TSXLDTRK</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <description>Mark the start of a TSX (HLE/RTM) suspend load address tracking region. If this is used inside a transactional region, subsequent loads are not added to the read set of the transaction. If this is used inside a suspend load address tracking region it will cause transaction abort. If this is used outside of a transactional region it behaves like a NOP.</description>
+ <instruction name="XSUSLDTRK" xed="XSUSLDTRK"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xresldtrk">
+ <CPUID>TSXLDTRK</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <description>Mark the end of a TSX (HLE/RTM) suspend load address tracking region. If this is used inside a suspend load address tracking region it will end the suspend region and all following load addresses will be added to the transaction read set. If this is used inside an active transaction but not in a suspend region it will cause transaction abort. If this is used outside of a transactional region it behaves like a NOP.</description>
+ <instruction name="XRESLDTRK" xed="XRESLDTRK"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_aesenclast_epi128">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>VAES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <parameter type="__m256i" varname="RoundKey" etype="M128"/>
+ <description>Perform the last round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst"."</description>
+ <operation>FOR j := 0 to 1
+ i := j*128
+ a[i+127:i] := ShiftRows(a[i+127:i])
+ a[i+127:i] := SubBytes(a[i+127:i])
+ dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VAESENCLAST" form="ymm, ymm" xed="VAESENCLAST_YMMu128_YMMu128_YMMu128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_aesenc_epi128">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>VAES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <parameter type="__m256i" varname="RoundKey" etype="M128"/>
+ <description>Perform one round of an AES encryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst"."</description>
+ <operation>FOR j := 0 to 1
+ i := j*128
+ a[i+127:i] := ShiftRows(a[i+127:i])
+ a[i+127:i] := SubBytes(a[i+127:i])
+ a[i+127:i] := MixColumns(a[i+127:i])
+ dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VAESENC" form="ymm, ymm" xed="VAESENC_YMMu128_YMMu128_YMMu128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_aesdeclast_epi128">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>VAES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <parameter type="__m256i" varname="RoundKey" etype="M128"/>
+ <description>Perform the last round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*128
+ a[i+127:i] := InvShiftRows(a[i+127:i])
+ a[i+127:i] := InvSubBytes(a[i+127:i])
+ dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VAESDECLAST" form="ymm, ymm" xed="VAESDECLAST_YMMu128_YMMu128_YMMu128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_aesdec_epi128">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>VAES</CPUID>
+ <category>Cryptography</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="a" etype="M128"/>
+ <parameter type="__m256i" varname="RoundKey" etype="M128"/>
+ <description>Perform one round of an AES decryption flow on data (state) in "a" using the round key in "RoundKey", and store the results in "dst".</description>
+ <operation>FOR j := 0 to 1
+ i := j*128
+ a[i+127:i] := InvShiftRows(a[i+127:i])
+ a[i+127:i] := InvSubBytes(a[i+127:i])
+ a[i+127:i] := InvMixColumns(a[i+127:i])
+ dst[i+127:i] := a[i+127:i] XOR RoundKey[i+127:i]
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VAESDEC" form="ymm, ymm" xed="VAESDEC_YMMu128_YMMu128_YMMu128"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm512_clmulepi64_epi128">
+ <type>Integer</type>
+ <CPUID>VPCLMULQDQ</CPUID>
+ <category>Application-Targeted</category>
+ <return type="__m512i" varname="dst" etype="M128"/>
+ <parameter type="__m512i" varname="b" etype="M128"/>
+ <parameter type="__m512i" varname="c" etype="M128"/>
+ <parameter type="const int" varname="Imm8" etype="IMM" immwidth="8"/>
+ <description>Carry-less multiplication of one quadword of
+ 'b' by one quadword of 'c', stores
+ the 128-bit result in 'dst'. The immediate 'Imm8' is
+ used to determine which quadwords of 'b'
+ and 'c' should be used.</description>
+ <operation>
+DEFINE PCLMUL128(X,Y) {
+ FOR i := 0 to 63
+ TMP[i] := X[ 0 ] and Y[ i ]
+ FOR j := 1 to i
+ TMP[i] := TMP[i] xor (X[ j ] and Y[ i - j ])
+ ENDFOR
+ DEST[ i ] := TMP[ i ]
+ ENDFOR
+ FOR i := 64 to 126
+ TMP[i] := 0
+ FOR j := i - 63 to 63
+ TMP[i] := TMP[i] xor (X[ j ] and Y[ i - j ])
+ ENDFOR
+ DEST[ i ] := TMP[ i ]
+ ENDFOR
+ DEST[127] := 0
+ RETURN DEST // 128b vector
+}
+FOR i := 0 to 3
+ IF Imm8[0] == 0
+ TEMP1 := b.m128[i].qword[0]
+ ELSE
+ TEMP1 := b.m128[i].qword[1]
+ FI
+ IF Imm8[4] == 0
+ TEMP2 := c.m128[i].qword[0]
+ ELSE
+ TEMP2 := c.m128[i].qword[1]
+ FI
+ dst.m128[i] := PCLMUL128(TEMP1, TEMP2)
+ENDFOR
+dst[MAX:512] := 0
+ </operation>
+ <instruction name="VPCLMULQDQ" form="zmm, zmm, zmm, imm8" xed="VPCLMULQDQ_ZMMu128_ZMMu64_ZMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_mm256_clmulepi64_epi128">
+ <type>Integer</type>
+ <CPUID>AVX512VL</CPUID>
+ <CPUID>VPCLMULQDQ</CPUID>
+ <category>Application-Targeted</category>
+ <return type="__m256i" varname="dst" etype="M128"/>
+ <parameter type="__m256i" varname="b" etype="M128"/>
+ <parameter type="__m256i" varname="c" etype="M128"/>
+ <parameter type="const int" varname="Imm8" etype="IMM" immwidth="8"/>
+ <description>Carry-less multiplication of one quadword of
+ 'b' by one quadword of 'c', stores
+ the 128-bit result in 'dst'. The immediate 'Imm8' is
+ used to determine which quadwords of 'b'
+ and 'c' should be used.</description>
+ <operation>
+DEFINE PCLMUL128(X,Y) {
+ FOR i := 0 to 63
+ TMP[i] := X[ 0 ] and Y[ i ]
+ FOR j := 1 to i
+ TMP[i] := TMP[i] xor (X[ j ] and Y[ i - j ])
+ ENDFOR
+ DEST[ i ] := TMP[ i ]
+ ENDFOR
+ FOR i := 64 to 126
+ TMP[i] := 0
+ FOR j := i - 63 to 63
+ TMP[i] := TMP[i] xor (X[ j ] and Y[ i - j ])
+ ENDFOR
+ DEST[ i ] := TMP[ i ]
+ ENDFOR
+ DEST[127] := 0
+ RETURN DEST // 128b vector
+}
+FOR i := 0 to 1
+ IF Imm8[0] == 0
+ TEMP1 := b.m128[i].qword[0]
+ ELSE
+ TEMP1 := b.m128[i].qword[1]
+ FI
+ IF Imm8[4] == 0
+ TEMP2 := c.m128[i].qword[0]
+ ELSE
+ TEMP2 := c.m128[i].qword[1]
+ FI
+ dst.m128[i] := PCLMUL128(TEMP1, TEMP2)
+ENDFOR
+dst[MAX:256] := 0
+ </operation>
+ <instruction name="VPCLMULQDQ" form="ymm, ymm, ymm, imm8" xed="VPCLMULQDQ_YMMu128_YMMu64_YMMu64_IMM8_AVX512"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_tpause">
+ <type>Flag</type>
+ <CPUID>WAITPKG</CPUID>
+ <category>Miscellaneous</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned int" varname="ctrl" etype="UI32"/>
+ <parameter type="unsigned __int64" varname="counter" etype="UI64"/>
+ <description>Directs the processor to enter an implementation-dependent optimized state until the TSC reaches or exceeds the value specified in "counter". Bit 0 of "ctrl" selects between a lower power (cleared) or faster wakeup (set) optimized state. Returns the carry flag (CF).</description>
+ <instruction name="TPAUSE" form="r32" xed="TPAUSE_GPR32u32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_umwait">
+ <type>Flag</type>
+ <CPUID>WAITPKG</CPUID>
+ <category>Miscellaneous</category>
+ <return type="unsigned char" varname="dst" etype="UI8"/>
+ <parameter type="unsigned int" varname="ctrl" etype="UI32"/>
+ <parameter type="unsigned __int64" varname="counter" etype="UI64"/>
+ <description>Directs the processor to enter an implementation-dependent optimized state while monitoring a range of addresses. The instruction wakes up when the TSC reaches or exceeds the value specified in "counter" (if the monitoring hardware did not trigger beforehand). Bit 0 of "ctrl" selects between a lower power (cleared) or faster wakeup (set) optimized state. Returns the carry flag (CF).</description>
+ <instruction name="UMWAIT" form="r32" xed="UMWAIT_GPR32"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_umonitor">
+ <CPUID>WAITPKG</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="void*" varname="a"/>
+ <description>Sets up a linear address range to be
+ monitored by hardware and activates the
+ monitor. The address range should be a writeback
+ memory caching type. The address is
+ contained in "a".</description>
+ <instruction name="UMONITOR" form="r16/r32/r64" xed="UMONITOR_GPRa"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_wbnoinvd">
+ <CPUID>WBNOINVD</CPUID>
+ <category>Miscellaneous</category>
+ <return type="void"/>
+ <parameter type="void"/>
+ <description>Write back and do not flush internal caches.
+ Initiate writing-back without flushing of external
+ caches.</description>
+ <instruction name="WBNOINVD" xed="WBNOINVD"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xgetbv">
+ <CPUID>XSAVE</CPUID>
+ <category>OS-Targeted</category>
+ <return type="unsigned __int64" varname="dst" etype="UI64"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <description>Copy up to 64-bits from the value of the extended control register (XCR) specified by "a" into "dst". Currently only XFEATURE_ENABLED_MASK XCR is supported.</description>
+ <operation>dst[63:0] := XCR[a]
+ </operation>
+ <instruction name="XGETBV" xed="XGETBV"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xrstor">
+ <CPUID>XSAVE</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="rs_mask" etype="UI64"/>
+ <description>Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary.</description>
+ <operation>st_mask := mem_addr.HEADER.XSTATE_BV[62:0]
+FOR i := 0 to 62
+ IF (rs_mask[i] AND XCR0[i])
+ IF st_mask[i]
+ CASE (i) OF
+ 0: ProcessorState[x87_FPU] := mem_addr.FPUSSESave_Area[FPU]
+ 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE]
+ DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i]
+ ESAC
+ ELSE
+ // ProcessorExtendedState := Processor Supplied Values
+ CASE (i) OF
+ 1: MXCSR := mem_addr.FPUSSESave_Area[SSE]
+ ESAC
+ FI
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XRSTOR" form="m8" xed="XRSTOR_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xrstor64">
+ <CPUID>XSAVE</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="rs_mask" etype="UI64"/>
+ <description>Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary.</description>
+ <operation>st_mask := mem_addr.HEADER.XSTATE_BV[62:0]
+FOR i := 0 to 62
+ IF (rs_mask[i] AND XCR0[i])
+ IF st_mask[i]
+ CASE (i) OF
+ 0: ProcessorState[x87_FPU] := mem_addr.FPUSSESave_Area[FPU]
+ 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE]
+ DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i]
+ ESAC
+ ELSE
+ // ProcessorExtendedState := Processor Supplied Values
+ CASE (i) OF
+ 1: MXCSR := mem_addr.FPUSSESave_Area[SSE]
+ ESAC
+ FI
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XRSTOR64" form="m8" xed="XRSTOR64_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xsave">
+ <CPUID>XSAVE</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="save_mask" etype="UI64"/>
+ <description>Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary.</description>
+ <operation>mask[62:0] := save_mask[62:0] AND XCR0[62:0]
+FOR i := 0 to 62
+ IF mask[i]
+ CASE (i) OF
+ 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU]
+ 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE]
+ DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i]
+ ESAC
+ mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i]
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XSAVE" form="m8" xed="XSAVE_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xsave64">
+ <CPUID>XSAVE</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="save_mask" etype="UI64"/>
+ <description>Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary.</description>
+ <operation>mask[62:0] := save_mask[62:0] AND XCR0[62:0]
+FOR i := 0 to 62
+ IF mask[i]
+ CASE (i) OF
+ 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU]
+ 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE]
+ DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i]
+ ESAC
+ mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i]
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XSAVE64" form="m8" xed="XSAVE64_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xsaveopt">
+ <CPUID>XSAVE</CPUID>
+ <CPUID>XSAVEOPT</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="save_mask" etype="UI64"/>
+ <description>Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. The hardware may optimize the manner in which data is saved. The performance of this instruction will be equal to or better than using the XSAVE instruction.</description>
+ <operation>mask[62:0] := save_mask[62:0] AND XCR0[62:0]
+FOR i := 0 to 62
+ IF mask[i]
+ CASE (i) OF
+ 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU]
+ 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE]
+ 2: mem_addr.EXT_SAVE_Area2[YMM] := ProcessorState[YMM]
+ DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i]
+ ESAC
+ mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i]
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XSAVEOPT" form="m8" xed="XSAVEOPT_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xsaveopt64">
+ <CPUID>XSAVE</CPUID>
+ <CPUID>XSAVEOPT</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="save_mask" etype="UI64"/>
+ <description>Perform a full or partial save of the enabled processor states to memory at "mem_addr". State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary. The hardware may optimize the manner in which data is saved. The performance of this instruction will be equal to or better than using the XSAVE64 instruction.</description>
+ <operation>mask[62:0] := save_mask[62:0] AND XCR0[62:0]
+FOR i := 0 to 62
+ IF mask[i]
+ CASE (i) OF
+ 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU]
+ 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE]
+ 2: mem_addr.EXT_SAVE_Area2[YMM] := ProcessorState[YMM]
+ DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i]
+ ESAC
+ mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i]
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XSAVEOPT64" form="m8" xed="XSAVEOPT64_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xsetbv">
+ <CPUID>XSAVE</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="unsigned int" varname="a" etype="UI32"/>
+ <parameter type="unsigned __int64" varname="val" etype="UI64"/>
+ <description>Copy 64-bits from "val" to the extended control register (XCR) specified by "a". Currently only XFEATURE_ENABLED_MASK XCR is supported.</description>
+ <operation>
+XCR[a] := val[63:0]
+ </operation>
+ <instruction name="XSETBV" xed="XSETBV"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xsavec">
+ <CPUID>XSAVE</CPUID>
+ <CPUID>XSAVEC</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="save_mask" etype="UI64"/>
+ <description>Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsavec differs from xsave in that it uses compaction and that it may use init optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary.</description>
+ <operation>mask[62:0] := save_mask[62:0] AND XCR0[62:0]
+FOR i := 0 to 62
+ IF mask[i]
+ CASE (i) OF
+ 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU]
+ 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE]
+ DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i]
+ ESAC
+ mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i]
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XSAVEC" form="m8" xed="XSAVEC_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xsaves">
+ <CPUID>XSAVE</CPUID>
+ <CPUID>XSS</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="save_mask" etype="UI64"/>
+ <description>Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsaves differs from xsave in that it can save state components corresponding to bits set in IA32_XSS MSR and that it may use the modified optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary.</description>
+ <operation>mask[62:0] := save_mask[62:0] AND XCR0[62:0]
+FOR i := 0 to 62
+ IF mask[i]
+ CASE (i) OF
+ 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU]
+ 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE]
+ DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i]
+ ESAC
+ mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i]
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XSAVES" form="m8" xed="XSAVES_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xsavec64">
+ <CPUID>XSAVE</CPUID>
+ <CPUID>XSAVEC</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="save_mask" etype="UI64"/>
+ <description>Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsavec differs from xsave in that it uses compaction and that it may use init optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary.</description>
+ <operation>mask[62:0] := save_mask[62:0] AND XCR0[62:0]
+FOR i := 0 to 62
+ IF mask[i]
+ CASE (i) OF
+ 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU]
+ 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE]
+ DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i]
+ ESAC
+ mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i]
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XSAVEC64" form="m8" xed="XSAVEC64_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xsaves64">
+ <CPUID>XSAVE</CPUID>
+ <CPUID>XSS</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="save_mask" etype="UI64"/>
+ <description>Perform a full or partial save of the enabled processor states to memory at "mem_addr"; xsaves differs from xsave in that it can save state components corresponding to bits set in IA32_XSS MSR and that it may use the modified optimization. State is saved based on bits [62:0] in "save_mask" and "XCR0". "mem_addr" must be aligned on a 64-byte boundary.</description>
+ <operation>mask[62:0] := save_mask[62:0] AND XCR0[62:0]
+FOR i := 0 to 62
+ IF mask[i]
+ CASE (i) OF
+ 0: mem_addr.FPUSSESave_Area[FPU] := ProcessorState[x87_FPU]
+ 1: mem_addr.FPUSSESaveArea[SSE] := ProcessorState[SSE]
+ DEFAULT: mem_addr.Ext_Save_Area[i] := ProcessorState[i]
+ ESAC
+ mem_addr.HEADER.XSTATE_BV[i] := INIT_FUNCTION[i]
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XSAVEC64" form="m8" xed="XSAVEC64_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xrstors">
+ <CPUID>XSAVE</CPUID>
+ <CPUID>XSS</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="const void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="rs_mask" etype="UI64"/>
+ <description>Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". xrstors differs from xrstor in that it can restore state components corresponding to bits set in the IA32_XSS MSR; xrstors cannot restore from an xsave area in which the extended region is in the standard form. State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary.</description>
+ <operation>st_mask := mem_addr.HEADER.XSTATE_BV[62:0]
+FOR i := 0 to 62
+ IF (rs_mask[i] AND XCR0[i])
+ IF st_mask[i]
+ CASE (i) OF
+ 0: ProcessorState[x87_FPU] := mem_addr.FPUSSESave_Area[FPU]
+ 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE]
+ DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i]
+ ESAC
+ ELSE
+ // ProcessorExtendedState := Processor Supplied Values
+ CASE (i) OF
+ 1: MXCSR := mem_addr.FPUSSESave_Area[SSE]
+ ESAC
+ FI
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XRSTORS" form="m8" xed="XRSTORS_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+<intrinsic tech="Other" name="_xrstors64">
+ <CPUID>XSAVE</CPUID>
+ <CPUID>XSS</CPUID>
+ <category>OS-Targeted</category>
+ <return type="void"/>
+ <parameter type="const void *" varname="mem_addr"/>
+ <parameter type="unsigned __int64" varname="rs_mask" etype="UI64"/>
+ <description>Perform a full or partial restore of the enabled processor states using the state information stored in memory at "mem_addr". xrstors differs from xrstor in that it can restore state components corresponding to bits set in the IA32_XSS MSR; xrstors cannot restore from an xsave area in which the extended region is in the standard form. State is restored based on bits [62:0] in "rs_mask", "XCR0", and "mem_addr.HEADER.XSTATE_BV". "mem_addr" must be aligned on a 64-byte boundary.</description>
+ <operation>st_mask := mem_addr.HEADER.XSTATE_BV[62:0]
+FOR i := 0 to 62
+ IF (rs_mask[i] AND XCR0[i])
+ IF st_mask[i]
+ CASE (i) OF
+ 0: ProcessorState[x87_FPU] := mem_addr.FPUSSESave_Area[FPU]
+ 1: ProcessorState[SSE] := mem_addr.FPUSSESaveArea[SSE]
+ DEFAULT: ProcessorState[i] := mem_addr.Ext_Save_Area[i]
+ ESAC
+ ELSE
+ // ProcessorExtendedState := Processor Supplied Values
+ CASE (i) OF
+ 1: MXCSR := mem_addr.FPUSSESave_Area[SSE]
+ ESAC
+ FI
+ FI
+ i := i + 1
+ENDFOR
+ </operation>
+ <instruction name="XRSTORS64" form="m8" xed="XRSTORS64_MEMmxsave"/>
+ <header>immintrin.h</header>
+</intrinsic>
+</intrinsics_list> \ No newline at end of file