@@ -118,6 +118,7 @@ static const struct isa_ext_data isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
+ ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
@@ -1194,8 +1195,8 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
* In principle Zve*x would also suffice here, were they supported
* in qemu
*/
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha ||
- cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned ||
+ cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
error_setg(errp,
"Vector crypto extensions require V or Zve* extensions");
return;
@@ -1710,6 +1711,7 @@ static Property riscv_cpu_extensions[] = {
/* Vector cryptography extensions */
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
+ DEFINE_PROP_BOOL("x-zvkg", RISCVCPU, cfg.ext_zvkg, false),
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
@@ -85,6 +85,7 @@ struct RISCVCPUConfig {
bool ext_zve64d;
bool ext_zvbb;
bool ext_zvbc;
+ bool ext_zvkg;
bool ext_zvkned;
bool ext_zvknha;
bool ext_zvknhb;
@@ -1241,3 +1241,6 @@ DEF_HELPER_5(vsha2cl_vv, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsm3me_vv, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
+
+DEF_HELPER_5(vghsh_vv, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_4(vgmul_vv, void, ptr, ptr, env, i32)
@@ -957,3 +957,7 @@ vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
# *** Zvksh vector crypto extension ***
vsm3me_vv 100000 1 ..... ..... 010 ..... 1110111 @r_vm_1
vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
+
+# *** Zvkg vector crypto extension ***
+vghsh_vv 101100 1 ..... ..... 010 ..... 1110111 @r_vm_1
+vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1
@@ -510,3 +510,33 @@ static inline bool vsm3c_check(DisasContext *s, arg_rmrr *a)
GEN_VV_UNMASKED_TRANS(vsm3me_vv, vsm3me_check, ZVKSH_EGS)
GEN_VI_UNMASKED_TRANS(vsm3c_vi, vsm3c_check, ZVKSH_EGS)
+
+/*
+ * Zvkg
+ */
+
+#define ZVKG_EGS 4
+
+static bool vgmul_check(DisasContext *s, arg_rmr *a)
+{
+ int egw_bytes = ZVKG_EGS << s->sew;
+ return s->cfg_ptr->ext_zvkg == true &&
+ vext_check_isa_ill(s) &&
+ require_rvv(s) &&
+ MAXSZ(s) >= egw_bytes &&
+ vext_check_ss(s, a->rd, a->rs2, a->vm) &&
+ s->sew == MO_32;
+}
+
+GEN_V_UNMASKED_TRANS(vgmul_vv, vgmul_check, ZVKG_EGS)
+
+static bool vghsh_check(DisasContext *s, arg_rmrr *a)
+{
+ int egw_bytes = ZVKG_EGS << s->sew;
+ return s->cfg_ptr->ext_zvkg == true &&
+ opivv_check(s, a) &&
+ MAXSZ(s) >= egw_bytes &&
+ s->sew == MO_32;
+}
+
+GEN_VV_UNMASKED_TRANS(vghsh_vv, vghsh_check, ZVKG_EGS)
@@ -752,3 +752,75 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
env->vstart = 0;
}
+
+void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
+ CPURISCVState *env, uint32_t desc)
+{
+ uint64_t *vd = vd_vptr;
+ uint64_t *vs1 = vs1_vptr;
+ uint64_t *vs2 = vs2_vptr;
+ uint32_t vta = vext_vta(desc);
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
+
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
+ uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]};
+ uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])};
+ uint64_t X[2] = {vs1[i * 2 + 0], vs1[i * 2 + 1]};
+ uint64_t Z[2] = {0, 0};
+
+ uint64_t S[2] = {brev8(Y[0] ^ X[0]), brev8(Y[1] ^ X[1])};
+
+ for (uint j = 0; j < 128; j++) {
+ if ((S[j / 64] >> (j % 64)) & 1) {
+ Z[0] ^= H[0];
+ Z[1] ^= H[1];
+ }
+ bool reduce = ((H[1] >> 63) & 1);
+ H[1] = H[1] << 1 | H[0] >> 63;
+ H[0] = H[0] << 1;
+ if (reduce) {
+ H[0] ^= 0x87;
+ }
+ }
+
+ vd[i * 2 + 0] = brev8(Z[0]);
+ vd[i * 2 + 1] = brev8(Z[1]);
+ }
+ /* set tail elements to 1s */
+ vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
+ env->vstart = 0;
+}
+
+void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
+ uint32_t desc)
+{
+ uint64_t *vd = vd_vptr;
+ uint64_t *vs2 = vs2_vptr;
+ uint32_t vta = vext_vta(desc);
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
+
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
+ uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])};
+ uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])};
+ uint64_t Z[2] = {0, 0};
+
+ for (uint j = 0; j < 128; j++) {
+ if ((Y[j / 64] >> (j % 64)) & 1) {
+ Z[0] ^= H[0];
+ Z[1] ^= H[1];
+ }
+ bool reduce = ((H[1] >> 63) & 1);
+ H[1] = H[1] << 1 | H[0] >> 63;
+ H[0] = H[0] << 1;
+ if (reduce) {
+ H[0] ^= 0x87;
+ }
+ }
+
+ vd[i * 2 + 0] = brev8(Z[0]);
+ vd[i * 2 + 1] = brev8(Z[1]);
+ }
+ /* set tail elements to 1s */
+ vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
+ env->vstart = 0;
+}