Import of kelvin-sim using copybara.
Included changes:
- 812935805 Add ELF program loading to Kelvin cosim DPI. by Shodan Team <no-reply@google.com>
- 812914930 BEGIN_PUBLIC by Shodan Team <no-reply@google.com>
- 812914256 Copybara change to remove absl_nonnull and absl_nullable ... by Shodan Team <no-reply@google.com>
- 811428631 Update copybara to correctly format BUILD files with exte... by Shodan Team <no-reply@google.com>
- 807862159 Update MPACT-RiscV to latest GH version. by Shodan Team <no-reply@google.com>
- 805040233 BEGIN_PUBLIC by Shodan Team <no-reply@google.com>
- 804932065 BEGIN_PUBLIC by Shodan Team <no-reply@google.com>
- 804640536 BEGIN_PUBLIC by Shodan Team <no-reply@google.com>
- 800990917 BEGIN_PUBLIC by Shodan Team <no-reply@google.com>
- 799665499 BEGIN_PUBLIC by Shodan Team <no-reply@google.com>
- 794735862 Remove protobuf dependency for external GoB setup by Shodan Team <no-reply@google.com>
PiperOrigin-RevId: 812935805
Change-Id: Id034c56599a34e5411d92caad09c755f00623f95
diff --git a/WORKSPACE b/WORKSPACE
index 8fc3a69..caa3e7b 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -6,9 +6,9 @@
# MPACT-RiscV repo
http_archive(
name = "com_google_mpact-riscv",
- sha256 = "4e24df1e0b41f1ba04c8f72b1abd3d82b71e4517fa2fcd54c103134f535c0db6",
- strip_prefix = "mpact-riscv-92597b9bc9f07f7dedc0d380af70dbc3cf595339",
- url = "https://github.com/google/mpact-riscv/archive/92597b9bc9f07f7dedc0d380af70dbc3cf595339.tar.gz",
+ sha256 = "7accadc80be5e3a57723992aafd825d43e29bfeb6646cd111eb3a7134c2d5705",
+ strip_prefix = "mpact-riscv-925b2e06f2820db0012fa78b312a615286ac0911",
+ url = "https://github.com/google/mpact-riscv/archive/925b2e06f2820db0012fa78b312a615286ac0911.tar.gz",
)
# Download only the single svdpi.h file.
diff --git a/sim/BUILD b/sim/BUILD
index c2c915e..62400ee 100644
--- a/sim/BUILD
+++ b/sim/BUILD
@@ -213,3 +213,101 @@
"@com_googlesource_code_re2//:re2",
],
)
+
+cc_library(
+ name = "kelvin_v2_state",
+ srcs = [
+ "kelvin_v2_state.cc",
+ ],
+ hdrs = [
+ "kelvin_v2_state.h",
+ ],
+ copts = [
+ "-O3",
+ ],
+ deps = [
+ "@com_google_absl//absl/base:nullability",
+ "@com_google_absl//absl/strings",
+ "@com_google_mpact-riscv//riscv:riscv_state",
+ "@com_google_mpact-sim//mpact/sim/util/memory",
+ ],
+)
+
+mpact_isa_decoder(
+ name = "kelvin_v2_isa",
+ src = "kelvin_v2.isa",
+ includes = [
+ "@com_google_mpact-riscv//riscv:riscv32g.isa",
+ "@com_google_mpact-riscv//riscv:riscv32zb.isa",
+ "@com_google_mpact-riscv//riscv:riscv_vector.isa",
+ ],
+ isa_name = "KelvinV2",
+ deps = [
+ "@com_google_absl//absl/functional:bind_front",
+ "@com_google_mpact-riscv//riscv:riscv_bitmanip_instructions",
+ "@com_google_mpact-riscv//riscv:riscv_g",
+ "@com_google_mpact-riscv//riscv:riscv_v",
+ ],
+)
+
+mpact_bin_fmt_decoder(
+ name = "kelvin_v2_bin_fmt",
+ src = "kelvin_v2.bin_fmt",
+ decoder_name = "KelvinV2",
+ includes = [
+ "@com_google_mpact-riscv//riscv:riscv32g.bin_fmt",
+ "@com_google_mpact-riscv//riscv:riscv_format16.bin_fmt",
+ "@com_google_mpact-riscv//riscv:riscv_format32.bin_fmt",
+ "@com_google_mpact-riscv//riscv:riscv_vector.bin_fmt",
+ ],
+ deps = [
+ ":kelvin_v2_isa",
+ ],
+)
+
+cc_library(
+ name = "kelvin_v2_user_decoder",
+ srcs = [
+ "kelvin_v2_encoding.cc",
+ "kelvin_v2_user_decoder.cc",
+ ],
+ hdrs = [
+ "kelvin_v2_encoding.h",
+ "kelvin_v2_user_decoder.h",
+ ],
+ copts = ["-O3"],
+ deps = [
+ ":kelvin_v2_bin_fmt",
+ ":kelvin_v2_getters",
+ ":kelvin_v2_isa",
+ ":kelvin_v2_state",
+ "@com_google_absl//absl/base:nullability",
+ "@com_google_absl//absl/container:flat_hash_map",
+ "@com_google_absl//absl/functional:any_invocable",
+ "@com_google_mpact-riscv//riscv:riscv_encoding_common",
+ "@com_google_mpact-riscv//riscv:riscv_state",
+ "@com_google_mpact-sim//mpact/sim/generic:arch_state",
+ "@com_google_mpact-sim//mpact/sim/generic:core",
+ "@com_google_mpact-sim//mpact/sim/generic:instruction",
+ "@com_google_mpact-sim//mpact/sim/generic:program_error",
+ "@com_google_mpact-sim//mpact/sim/generic:type_helpers",
+ "@com_google_mpact-sim//mpact/sim/util/memory",
+ ],
+)
+
+cc_library(
+ name = "kelvin_v2_getters",
+ hdrs = ["kelvin_v2_getters.h"],
+ deps = [
+ "@com_google_absl//absl/base:nullability",
+ "@com_google_absl//absl/container:flat_hash_map",
+ "@com_google_absl//absl/functional:any_invocable",
+ "@com_google_absl//absl/status:statusor",
+ "@com_google_absl//absl/strings",
+ "@com_google_mpact-riscv//riscv:riscv_encoding_common",
+ "@com_google_mpact-riscv//riscv:riscv_getters",
+ "@com_google_mpact-riscv//riscv:riscv_state",
+ "@com_google_mpact-sim//mpact/sim/generic:core",
+ "@com_google_mpact-sim//mpact/sim/generic:type_helpers",
+ ],
+)
diff --git a/sim/cosim/BUILD b/sim/cosim/BUILD
index cf69fca..4e00b57 100644
--- a/sim/cosim/BUILD
+++ b/sim/cosim/BUILD
@@ -34,16 +34,18 @@
],
visibility = ["//visibility:public"],
deps = [
+ "//sim:kelvin_v2_state",
+ "//sim:kelvin_v2_user_decoder",
"@com_google_absl//absl/log",
"@com_google_absl//absl/log:check",
+ "@com_google_absl//absl/status",
"@com_google_absl//absl/strings",
- "@com_google_mpact-riscv//riscv:riscv32g_vec_decoder",
- "@com_google_mpact-riscv//riscv:riscv32gv_isa",
"@com_google_mpact-riscv//riscv:riscv_fp_state",
"@com_google_mpact-riscv//riscv:riscv_state",
"@com_google_mpact-riscv//riscv:riscv_top",
"@com_google_mpact-sim//mpact/sim/generic:core",
"@com_google_mpact-sim//mpact/sim/util/memory",
+ "@com_google_mpact-sim//mpact/sim/util/program_loader:elf_loader",
],
alwayslink = True,
)
diff --git a/sim/cosim/kelvin_cosim_dpi.h b/sim/cosim/kelvin_cosim_dpi.h
index 5e1c4db..1830e45 100644
--- a/sim/cosim/kelvin_cosim_dpi.h
+++ b/sim/cosim/kelvin_cosim_dpi.h
@@ -37,6 +37,10 @@
// Return 0 on success, non-zero on failure.
int mpact_init();
+// Loads an ELF file into the simulation memory.
+// Return 0 on success, non-zero on failure.
+int mpact_load_program(const char* elf_file);
+
// Reset the MPACT simulator's architectural state.
// Return 0 on success, non-zero on failure.
int mpact_reset();
@@ -51,18 +55,11 @@
// Currently unimplemented and always returns false.
bool mpact_is_halted();
-// Return the current value of the Program Counter (PC).
-// On error, returns 0 and logs an error.
-uint32_t mpact_get_pc();
-
-// Return the value of the specified GPR. GPRs are selected by their index,
-// where 0 is x0, 1 is x1, and so on.
-// On error, returns 0 and logs an error.
-uint32_t mpact_get_gpr(uint32_t index);
-
-// Return the value of the specified CSR. CSRs are selected by their address.
-// On error, returns 0 and logs an error.
-uint32_t mpact_get_csr(uint32_t address);
+// Return the value of the specified register. Register names are provided as
+// null-terminated c-style strings.
+// Returns 0 on success, non-zero on failure.
+// The register value is returned in the uint32_t argument.
+int mpact_get_register(const char* name, uint32_t* value);
// Finalize and clean up MPACT simulator resources.
// Return 0 on success, non-zero on failure.
diff --git a/sim/cosim/kelvin_cosim_dpi_wrapper.cc b/sim/cosim/kelvin_cosim_dpi_wrapper.cc
index 3a5d7c3..ca1ee22 100644
--- a/sim/cosim/kelvin_cosim_dpi_wrapper.cc
+++ b/sim/cosim/kelvin_cosim_dpi_wrapper.cc
@@ -16,11 +16,12 @@
#include <memory>
#include <string>
+#include "sim/kelvin_v2_state.h"
+#include "sim/kelvin_v2_user_decoder.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
+#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
-#include "riscv/riscv32g_vec_decoder.h"
-#include "riscv/riscv_csr.h"
#include "riscv/riscv_fp_state.h"
#include "riscv/riscv_register.h"
#include "riscv/riscv_register_aliases.h"
@@ -30,6 +31,7 @@
#include "mpact/sim/generic/decoder_interface.h"
#include "mpact/sim/util/memory/flat_demand_memory.h"
#include "mpact/sim/util/memory/memory_interface.h"
+#include "mpact/sim/util/program_loader/elf_program_loader.h"
#include "external/svdpi_h_file/file/svdpi.h"
// Include the DPI-C contract header.
@@ -39,9 +41,11 @@
constexpr uint32_t kKelvinStartAddress = 0;
namespace {
+using ::kelvin::sim::KelvinV2State;
+using ::kelvin::sim::KelvinV2UserDecoder;
using ::mpact::sim::generic::DecoderInterface;
+using ::mpact::sim::riscv::kFRegisterAliases;
using ::mpact::sim::riscv::kXRegisterAliases;
-using ::mpact::sim::riscv::RiscV32GVecDecoder;
using ::mpact::sim::riscv::RiscVFPState;
using ::mpact::sim::riscv::RiscVState;
using ::mpact::sim::riscv::RiscVTop;
@@ -49,6 +53,7 @@
using ::mpact::sim::riscv::RiscVXlen;
using ::mpact::sim::riscv::RV32Register;
using ::mpact::sim::riscv::RVFpRegister;
+using ::mpact::sim::util::ElfProgramLoader;
using ::mpact::sim::util::FlatDemandMemory;
using ::mpact::sim::util::MemoryInterface;
@@ -56,69 +61,85 @@
public:
MpactHandle()
: memory_(std::make_unique<FlatDemandMemory>()),
- rv_state_(CreateRVState(memory_.get())),
- rv_fp_state_(CreateFPState(rv_state_.get())),
- rvv_state_(CreateVectorState(rv_state_.get())),
- rv_decoder_(CreateDecoder(rv_state_.get(), memory_.get())),
- rv_top_(CreateRiscVTop(rv_state_.get(), rv_decoder_.get())) {
+ state_(CreateState(memory_.get())),
+ rv_fp_state_(CreateFPState(state_.get())),
+ rvv_state_(CreateVectorState(state_.get())),
+ rv_decoder_(CreateDecoder(state_.get(), memory_.get())),
+ rv_top_(CreateRiscVTop(state_.get(), rv_decoder_.get())),
+ elf_loader_(std::make_unique<ElfProgramLoader>(memory_.get())) {
absl::Status pc_write = rv_top_->WriteRegister("pc", kKelvinStartAddress);
CHECK_OK(pc_write) << "Error writing to pc.";
}
+ absl::Status load_program(const std::string& elf_file) {
+ auto load_result = elf_loader_->LoadProgram(elf_file);
+ if (!load_result.ok()) {
+ return absl::InternalError(
+ absl::StrCat("Failed to load program '", elf_file,
+ "': ", load_result.status().message()));
+ }
+ return absl::OkStatus();
+ }
+
uint32_t get_pc() {
absl::StatusOr<uint64_t> read_reg_status = rv_top_->ReadRegister("pc");
CHECK_OK(read_reg_status);
- if (!read_reg_status.ok()) {
- LOG(ERROR) << "[DPI] Failed to read pc.";
- return 0;
- }
return static_cast<uint32_t>(read_reg_status.value());
}
RiscVTop* rv_top() { return rv_top_.get(); }
- RiscVState* rv_state() { return rv_state_.get(); }
+ KelvinV2State* rv_state() { return state_.get(); }
+
+ ElfProgramLoader* elf_loader() { return elf_loader_.get(); }
private:
- std::unique_ptr<RiscVState> CreateRVState(MemoryInterface* memory) {
- auto rv_state =
- std::make_unique<RiscVState>("RiscV32GV", RiscVXlen::RV32, memory);
+ std::unique_ptr<KelvinV2State> CreateState(MemoryInterface* memory) {
+ auto state =
+ std::make_unique<KelvinV2State>("KelvinV2", RiscVXlen::RV32, memory);
// Make sure the architectural and abi register aliases are added.
std::string reg_name;
for (int i = 0; i < 32; i++) {
reg_name = absl::StrCat(RiscVState::kXregPrefix, i);
- (void)rv_state->AddRegister<RV32Register>(reg_name);
- (void)rv_state->AddRegisterAlias<RV32Register>(reg_name,
- kXRegisterAliases[i]);
+ [[maybe_unused]] RV32Register* xreg =
+ state->AddRegister<RV32Register>(reg_name);
+ CHECK_OK(state->AddRegisterAlias<RV32Register>(reg_name,
+ kXRegisterAliases[i]));
+
+ reg_name = absl::StrCat(RiscVState::kFregPrefix, i);
+ [[maybe_unused]] RVFpRegister* freg =
+ state->AddRegister<RVFpRegister>(reg_name);
+ CHECK_OK(state->AddRegisterAlias<RVFpRegister>(reg_name,
+ kFRegisterAliases[i]));
}
- return rv_state;
+ return state;
}
- std::unique_ptr<RiscVFPState> CreateFPState(RiscVState* rv_state) {
- return std::make_unique<RiscVFPState>(rv_state->csr_set(), rv_state);
+ std::unique_ptr<RiscVFPState> CreateFPState(KelvinV2State* state) {
+ return std::make_unique<RiscVFPState>(state->csr_set(), state);
}
- std::unique_ptr<RiscVVectorState> CreateVectorState(RiscVState* rv_state) {
- return std::make_unique<RiscVVectorState>(rv_state,
- kKelvinVectorByteLength);
+ std::unique_ptr<RiscVVectorState> CreateVectorState(KelvinV2State* state) {
+ return std::make_unique<RiscVVectorState>(state, kKelvinVectorByteLength);
}
- std::unique_ptr<DecoderInterface> CreateDecoder(RiscVState* rv_state,
+ std::unique_ptr<DecoderInterface> CreateDecoder(KelvinV2State* state,
MemoryInterface* memory) {
- return std::make_unique<RiscV32GVecDecoder>(rv_state, memory);
+ return std::make_unique<KelvinV2UserDecoder>(state, memory);
}
- std::unique_ptr<RiscVTop> CreateRiscVTop(RiscVState* rv_state,
+ std::unique_ptr<RiscVTop> CreateRiscVTop(KelvinV2State* state,
DecoderInterface* decoder) {
- return std::make_unique<RiscVTop>("KelvinPlaceholder", rv_state, decoder);
+ return std::make_unique<RiscVTop>("KelvinPlaceholder", state, decoder);
}
const std::unique_ptr<MemoryInterface> memory_;
- const std::unique_ptr<RiscVState> rv_state_;
+ const std::unique_ptr<KelvinV2State> state_;
const std::unique_ptr<RiscVFPState> rv_fp_state_;
const std::unique_ptr<RiscVVectorState> rvv_state_;
const std::unique_ptr<DecoderInterface> rv_decoder_;
const std::unique_ptr<RiscVTop> rv_top_;
+ const std::unique_ptr<ElfProgramLoader> elf_loader_;
};
MpactHandle* g_mpact_handle = nullptr;
@@ -134,6 +155,20 @@
return 0;
}
+int mpact_load_program(const char* elf_file) {
+ if (elf_file == nullptr) {
+ LOG(ERROR) << "[DPI] mpact_init: received a null elf program.";
+ return -1;
+ }
+ absl::Status status = g_mpact_handle->load_program(elf_file);
+ if (!status.ok()) {
+ LOG(ERROR) << "[DPI] Failed to load elf program '" << elf_file
+ << "': " << status.message();
+ return -1;
+ }
+ return 0;
+}
+
int mpact_reset() {
if (g_mpact_handle != nullptr) {
mpact_fini();
@@ -171,46 +206,32 @@
return false;
}
-uint32_t mpact_get_pc() {
- if (g_mpact_handle == nullptr) {
- LOG(ERROR) << "[DPI] mpact_get_pc: g_mpact_handle is null.";
- return 0;
+int mpact_get_register(const char* name, uint32_t* value) {
+ if (value == nullptr) {
+ LOG(ERROR) << "[DPI] mpact_get_register: value is null.";
+ return -1;
}
- return g_mpact_handle->get_pc();
-}
-
-uint32_t mpact_get_gpr(uint32_t index) {
- if (g_mpact_handle == nullptr) {
- LOG(ERROR) << "[DPI] mpact_get_gpr: g_mpact_handle is null.";
- return 0;
+ if (name == nullptr) {
+ LOG(ERROR) << "[DPI] mpact_get_register: name is null.";
+ return -3;
}
- std::string reg_name =
- absl::StrCat(mpact::sim::riscv::RiscVState::kXregPrefix, index);
- mpact::sim::riscv::RiscVTop* rv_top = g_mpact_handle->rv_top();
+ *value = 0;
+ if (g_mpact_handle == nullptr) {
+ LOG(ERROR) << "[DPI] mpact_get_register: g_mpact_handle is null.";
+ return -2;
+ }
+ std::string reg_name(name);
+ RiscVTop* rv_top = g_mpact_handle->rv_top();
absl::StatusOr<uint64_t> read_reg_status = rv_top->ReadRegister(reg_name);
if (!read_reg_status.ok()) {
- LOG(ERROR) << "[DPI] mpact_get_gpr: Failed to read register: " << reg_name;
- return 0;
+ LOG(ERROR) << "[DPI] mpact_get_register: Failed to read register: "
+ << reg_name;
+ return -3;
}
- return static_cast<uint32_t>(read_reg_status.value());
-}
-
-uint32_t mpact_get_csr(uint32_t address) {
- if (g_mpact_handle == nullptr) {
- LOG(ERROR) << "[DPI] mpact_get_csr: g_mpact_handle is null.";
- return 0;
- }
- uint64_t csr_index = static_cast<uint64_t>(address);
-
- absl::StatusOr<mpact::sim::riscv::RiscVCsrInterface*> get_csr_status =
- g_mpact_handle->rv_state()->csr_set()->GetCsr(csr_index);
-
- if (!get_csr_status.ok()) {
- LOG(ERROR) << "[DPI] mpact_get_csr: Failed to get CSR: " << address;
- return 0;
- }
- mpact::sim::riscv::RiscVCsrInterface* csr = get_csr_status.value();
- return csr->AsUint32();
+ // Kelvin V2 is a 32bit system. RiscVTop::ReadRegister outputs 64bit values
+ // for both 32bit and 64bit systems. We can safely cast the value to uint32_t.
+ *value = static_cast<uint32_t>(*read_reg_status);
+ return 0;
}
int mpact_fini() {
diff --git a/sim/kelvin_v2.bin_fmt b/sim/kelvin_v2.bin_fmt
new file mode 100644
index 0000000..db59919
--- /dev/null
+++ b/sim/kelvin_v2.bin_fmt
@@ -0,0 +1,799 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Defines the Kelvin V2 instructions for mpact-sim to generate binary decoders.
+decoder KelvinV2 {
+ namespace kelvin::sim::encoding;
+ opcode_enum = "::kelvin::sim::isa32_v2::OpcodeEnum";
+ includes {
+ #include "sim/kelvin_v2_decoder.h"
+ }
+ KelvinV2Inst32 = {
+ RiscVIInst32,
+ RiscVZicsrInst32,
+ RiscVZifenceiInst32,
+ RiscVMInst32,
+ RiscVFInst32,
+ RiscVZve32xInst32,
+ RiscVZbbInst32,
+ RiscVZbbInst32Only,
+ RiscVZbbImmInst32
+ };
+};
+
+// TODO: b/448154052 - Kelvin V2 sim should reuse mpact-riscv isa and bin_fmt
+// files.
+instruction group RiscVIInst32 = {RiscVIInstBase32, RiscVIHints32};
+
+
+
+
+format Inst32Format[32] {
+ fields:
+ unsigned bits[25];
+ unsigned opcode[7];
+};
+
+format RType[32] : Inst32Format {
+ fields:
+ unsigned func7[7];
+ unsigned rs2[5];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned rd[5];
+ unsigned opcode[7];
+ overlays:
+ unsigned r_uimm5[5] = rs2;
+};
+
+// Format for shift immediate for RV64, note 6 bit immediate.
+format RSType[32] : Inst32Format {
+ fields:
+ unsigned func6[6];
+ unsigned r_uimm6[6];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned rd[5];
+ unsigned opcode[7];
+};
+
+format R4Type[32] : Inst32Format {
+ fields:
+ unsigned rs3[5];
+ unsigned func2[2];
+ unsigned rs2[5];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned rd[5];
+ unsigned opcode[7];
+};
+
+format IType[32] : Inst32Format {
+ fields:
+ signed imm12[12];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned rd[5];
+ unsigned opcode[7];
+ overlays:
+ unsigned u_imm12[12] = imm12;
+ unsigned i_uimm5[5] = rs1;
+};
+
+format SType[32] : Inst32Format {
+ fields:
+ unsigned imm7[7];
+ unsigned rs2[5];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned imm5[5];
+ unsigned opcode[7];
+ overlays:
+ signed s_imm[12] = imm7, imm5;
+};
+
+format BType[32] : Inst32Format {
+ fields:
+ unsigned imm7[7];
+ unsigned rs2[5];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned imm5[5];
+ unsigned opcode[7];
+ overlays:
+ signed b_imm[13] = imm7[6], imm5[0], imm7[5..0], imm5[4..1], 0b0;
+};
+
+format UType[32] : Inst32Format {
+ fields:
+ unsigned imm20[20];
+ unsigned rd[5];
+ unsigned opcode[7];
+ overlays:
+ unsigned u_imm[32] = imm20, 0b0000'0000'0000;
+};
+
+format JType[32] : Inst32Format {
+ fields:
+ unsigned imm20[20];
+ unsigned rd[5];
+ unsigned opcode[7];
+ overlays:
+ signed j_imm[21] = imm20[19, 7..0, 8, 18..9], 0b0;
+};
+
+format Fence[32] : Inst32Format {
+ fields:
+ unsigned fm[4];
+ unsigned pred[4];
+ unsigned succ[4];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned rd[5];
+ unsigned opcode[7];
+};
+
+format AType[32] : Inst32Format {
+ fields:
+ unsigned func5[5];
+ unsigned aq[1];
+ unsigned rl[1];
+ unsigned rs2[5];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned rd[5];
+ unsigned opcode[7];
+};
+
+format F12Type[32] : Inst32Format {
+ fields:
+ unsigned func12[12];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned rd[5];
+ unsigned opcode[7];
+};
+
+format ZICBOP[32] : Inst32Format {
+ fields:
+ unsigned offset[7];
+ unsigned func5[5];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned imm5[5];
+ unsigned op[7];
+ overlays:
+ unsigned bop_uimm12[12] = offset, 0b00000;
+}
+
+format MopRType[32] : Inst32Format {
+ fields:
+ unsigned func1[1];
+ unsigned n_hi[1];
+ unsigned func2[2];
+ unsigned n_mid[2];
+ unsigned func4[4];
+ unsigned n_lo[2];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned rd[5];
+ unsigned opcode[7];
+ overlays:
+ unsigned mop_no[5] = n_hi, n_mid, n_lo;
+};
+
+format MopRRType[32] : Inst32Format {
+ fields:
+ unsigned func1h[1];
+ unsigned n_hi[1];
+ unsigned func2[2];
+ unsigned n_lo[2];
+ unsigned func1l[1];
+ unsigned rs2[5];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned rd[5];
+ unsigned opcode[7];
+ overlays:
+ unsigned mop_no[3] = n_hi, n_lo;
+};
+
+// Vector instruction formats.
+
+format VMem[32] : Inst32Format {
+ fields:
+ unsigned nf[3];
+ unsigned mew[1];
+ unsigned mop[2];
+ unsigned vm[1];
+ unsigned rs2[5];
+ unsigned rs1[5];
+ unsigned width[3];
+ unsigned vd[5];
+ unsigned opcode[7];
+ overlays:
+ unsigned lumop[5] = rs2;
+ unsigned sumop[5] = rs2;
+ unsigned vs2[5] = rs2;
+ unsigned vs3[5] = vd;
+};
+
+format VArith[32] : Inst32Format {
+ fields:
+ unsigned func6[6];
+ unsigned vm[1];
+ unsigned vs2[5];
+ unsigned vs1[5];
+ unsigned func3[3];
+ unsigned vd[5];
+ unsigned opcode[7];
+ overlays:
+ unsigned uimm5[5] = vs1;
+ unsigned uimm6[6] = func6[0], vs1;
+ unsigned func5[5] = func6[5..1];
+ signed simm5[5] = vs1;
+ unsigned rd[5] = vd;
+ unsigned rs1[5] = vs1;
+ unsigned vd_mask[5] = vd;
+};
+
+format VConfig[32] : Inst32Format {
+ fields:
+ unsigned top12[12];
+ unsigned rs1[5];
+ unsigned func3[3];
+ unsigned rd[5];
+ unsigned opcode[7];
+ overlays:
+ signed zimm11[11] = top12[10..0];
+ unsigned func1[1] = top12[11];
+ unsigned func2[2] = top12[11..10];
+ unsigned func7[7] = top12[11..5];
+ signed zimm10[10] = top12[9..0];
+ unsigned uimm5[5] = rs1;
+ unsigned rs2[5] = top12[4..0];
+};
+
+instruction group RiscVIInstBase32[32] : Inst32Format {
+ // RV32I Base Instruction Set
+ lui : UType : opcode == 0b011'0111, rd != 0;
+ auipc : UType : opcode == 0b001'0111, rd != 0;
+ jal : JType : rd != 0, opcode == 0b110'1111;
+ j : JType : rd == 0, opcode == 0b110'1111;
+ jalr : IType : rd != 0, func3 == 0b000, opcode == 0b110'0111;
+ jr : IType : rd == 0, func3 == 0b000, opcode == 0b110'0111;
+ beq : BType : func3 == 0b000, opcode == 0b110'0011;
+ bne : BType : func3 == 0b001, opcode == 0b110'0011;
+ blt : BType : func3 == 0b100, opcode == 0b110'0011;
+ bge : BType : func3 == 0b101, opcode == 0b110'0011;
+ bltu : BType : func3 == 0b110, opcode == 0b110'0011;
+ bgeu : BType : func3 == 0b111, opcode == 0b110'0011;
+ lb : BType : func3 == 0b000, opcode == 0b000'0011;
+ lh : BType : func3 == 0b001, opcode == 0b000'0011;
+ lw : BType : func3 == 0b010, opcode == 0b000'0011;
+ lbu : BType : func3 == 0b100, opcode == 0b000'0011;
+ lhu : BType : func3 == 0b101, opcode == 0b000'0011;
+ sb : SType : func3 == 0b000, opcode == 0b010'0011;
+ sh : SType : func3 == 0b001, opcode == 0b010'0011;
+ sw : SType : func3 == 0b010, opcode == 0b010'0011;
+ addi : IType : func3 == 0b000, opcode == 0b001'0011, rd != 0;
+ nop : IType : func3 == 0b000, opcode == 0b001'0011, rd == 0, imm12 == 0, rs1 == 0;
+ slti : IType : func3 == 0b010, opcode == 0b001'0011, rd != 0;
+ sltiu : IType : func3 == 0b011, opcode == 0b001'0011, rd != 0;
+ xori : IType : func3 == 0b100, rd != 0, opcode == 0b001'0011;
+ ori : IType : func3 == 0b110, rd != 0b00000, opcode == 0b001'0011;
+ andi : IType : func3 == 0b111, opcode == 0b001'0011, rd != 0;
+ slli : RType : func7 == 0b000'0000, func3==0b001, opcode == 0b001'0011;
+ srli : RType : func7 == 0b000'0000, func3==0b101, rd != 0, opcode == 0b001'0011;
+ srai : RType : func7 == 0b010'0000, func3==0b101, opcode == 0b001'0011;
+ add : RType : func7 == 0b000'0000, func3==0b000, rd != 0, opcode == 0b011'0011;
+ sub : RType : func7 == 0b010'0000, func3==0b000, rd != 0, opcode == 0b011'0011;
+ sll : RType : func7 == 0b000'0000, func3==0b001, rd != 0, opcode == 0b011'0011;
+ slt : RType : func7 == 0b000'0000, func3==0b010, rd != 0, opcode == 0b011'0011;
+ sltu : RType : func7 == 0b000'0000, func3==0b011, rd != 0, opcode == 0b011'0011;
+ xor : RType : func7 == 0b000'0000, func3==0b100, rd != 0, opcode == 0b011'0011;
+ srl : RType : func7 == 0b000'0000, func3==0b101, rd != 0, opcode == 0b011'0011;
+ sra : RType : func7 == 0b010'0000, func3==0b101, rd != 0, opcode == 0b011'0011;
+ or : RType : func7 == 0b000'0000, func3==0b110, rd != 0, opcode == 0b011'0011;
+ and : RType : func7 == 0b000'0000, func3==0b111, rd != 0, opcode == 0b011'0011;
+ fence : Fence : fm == 0b0000, succ != 0b0000, func3 == 0b000, opcode == 0b000'1111;
+ fence_tso : Fence : fm == 0b1000, pred == 0b0011, succ == 0b0011, func3 == 0b000, opcode == 0b000'1111;
+ ecall : Inst32Format : bits == 0b0000'0000'0000'00000'000'00000, opcode == 0b111'0011;
+ ebreak : Inst32Format : bits == 0b0000'0000'0001'00000'000'00000, opcode == 0b111'0011;
+};
+
+instruction group RiscVZifenceiInst32[32] : Inst32Format {
+ // RV32/RV64 Zifencei Standard Extension
+ fencei : IType : func3 == 001, opcode == 0b000'1111;
+};
+
+instruction group RiscVZicsrInst32[32] : Inst32Format {
+ // RV32/RV64 Zicsr Standard Extension
+ csrrw : IType : func3 == 0b001, u_imm12 != 0b1100'0000'0000, rd != 0, opcode == 0b111'0011;
+ csrrs : IType : func3 == 0b010, rs1 != 0, rd != 0, opcode == 0b111'0011;
+ csrrc : IType : func3 == 0b011, rs1 != 0, rd != 0, opcode == 0b111'0011;
+ csrrs_nr : IType : func3 == 0b010, rs1 != 0, rd == 0, opcode == 0b111'0011;
+ csrrc_nr : IType : func3 == 0b011, rs1 != 0, rd == 0, opcode == 0b111'0011;
+ csrrw_nr : IType : func3 == 0b001, u_imm12 != 0b1100'0000'0000, rd == 0, opcode == 0b111'0011;
+ csrrs_nw : IType : func3 == 0b010, rs1 == 0, opcode == 0b111'0011;
+ csrrc_nw : IType : func3 == 0b011, rs1 == 0, opcode == 0b111'0011;
+ csrrwi : IType : func3 == 0b101, rd != 0, opcode == 0b111'0011;
+ csrrsi : IType : func3 == 0b110, rs1 != 0, rd != 0, opcode == 0b111'0011;
+ csrrci : IType : func3 == 0b111, rs1 != 0, rd != 0, opcode == 0b111'0011;
+ csrrsi_nr: IType : func3 == 0b110, rs1 != 0, rd == 0, opcode == 0b111'0011;
+ csrrci_nr: IType : func3 == 0b111, rs1 != 0, rd == 0, opcode == 0b111'0011;
+ csrrwi_nr: IType : func3 == 0b101, rd == 0, opcode == 0b111'0011;
+ csrrsi_nw: IType : func3 == 0b110, rs1 == 0, opcode == 0b111'0011;
+ csrrci_nw: IType : func3 == 0b111, rs1 == 0, opcode == 0b111'0011;
+ unimp : IType : func3 == 0b001, u_imm12 == 0b1100'0000'0000, rs1 == 0, rd == 0, opcode == 0b111'0011;
+};
+
+instruction group RiscVMInst32[32] : Inst32Format {
+ // RV32M Standard Extension
+ mul : RType : func7 == 0b000'0001, func3 == 0b000, opcode == 0b011'0011;
+ mulh : RType : func7 == 0b000'0001, func3 == 0b001, opcode == 0b011'0011;
+ mulhsu : RType : func7 == 0b000'0001, func3 == 0b010, opcode == 0b011'0011;
+ mulhu : RType : func7 == 0b000'0001, func3 == 0b011, opcode == 0b011'0011;
+ div : RType : func7 == 0b000'0001, func3 == 0b100, opcode == 0b011'0011;
+ divu : RType : func7 == 0b000'0001, func3 == 0b101, opcode == 0b011'0011;
+ rem : RType : func7 == 0b000'0001, func3 == 0b110, opcode == 0b011'0011;
+ remu : RType : func7 == 0b000'0001, func3 == 0b111, opcode == 0b011'0011;
+};
+
+instruction group RiscVFInst32[32] : Inst32Format {
+ // RV32F Standard Extension
+ flw : IType : func3 == 0b010, opcode == 0b000'0111;
+ fsw : SType : func3 == 0b010, opcode == 0b010'0111;
+ fmadd_s : R4Type : func2 == 0b00, opcode == 0b100'0011;
+ fmsub_s : R4Type : func2 == 0b00, opcode == 0b100'0111;
+ fnmsub_s : R4Type : func2 == 0b00, opcode == 0b100'1011;
+ fnmadd_s : R4Type : func2 == 0b00, opcode == 0b100'1111;
+ fadd_s : RType : func7 == 0b000'0000, opcode == 0b101'0011;
+ fsub_s : RType : func7 == 0b000'0100, opcode == 0b101'0011;
+ fmul_s : RType : func7 == 0b000'1000, opcode == 0b101'0011;
+ fdiv_s : RType : func7 == 0b000'1100, opcode == 0b101'0011;
+ fsqrt_s : RType : func7 == 0b010'1100, rs2 == 0, opcode == 0b101'0011;
+ fsgnj_s : RType : func7 == 0b001'0000, func3 == 0b000, opcode == 0b101'0011;
+ fsgnjn_s : RType : func7 == 0b001'0000, func3 == 0b001, opcode == 0b101'0011;
+ fsgnjx_s : RType : func7 == 0b001'0000, func3 == 0b010, opcode == 0b101'0011;
+ fmin_s : RType : func7 == 0b001'0100, func3 == 0b000, opcode == 0b101'0011;
+ fmax_s : RType : func7 == 0b001'0100, func3 == 0b001, opcode == 0b101'0011;
+ fcvt_ws : RType : func7 == 0b110'0000, rs2 == 0, opcode == 0b101'0011;
+ fcvt_wus : RType : func7 == 0b110'0000, rs2 == 1, opcode == 0b101'0011;
+ fmv_xw : RType : func7 == 0b111'0000, rs2 == 0, func3 == 0b000, opcode == 0b101'0011;
+ fcmpeq_s : RType : func7 == 0b101'0000, func3 == 0b010, opcode == 0b101'0011;
+ fcmplt_s : RType : func7 == 0b101'0000, func3 == 0b001, opcode == 0b101'0011;
+ fcmple_s : RType : func7 == 0b101'0000, func3 == 0b000, opcode == 0b101'0011;
+ fclass_s : RType : func7 == 0b111'0000, rs2 == 0, func3 == 0b001, opcode == 0b101'0011;
+ fcvt_sw : RType : func7 == 0b110'1000, rs2 == 0, opcode == 0b101'0011;
+ fcvt_swu : RType : func7 == 0b110'1000, rs2 == 1, opcode == 0b101'0011;
+ fmv_wx : RType : func7 == 0b111'1000, rs2 == 0, func3 == 0b000, opcode == 0b101'0011;
+};
+
+// Encoding for RiscV hint instructions.
+instruction group RiscVIHints32[32] : Inst32Format {
+ lui_hint: UType : opcode == 0b011'0111, rd == 0;
+ auipc_hint: UType : opcode == 0b001'0111, rd == 0;
+ addi_hint1: IType : func3 == 0b000, opcode == 0b001'0011, rd == 0, rs1 != 0;
+ addi_hint2: IType : func3 == 0b000, opcode == 0b001'0011, rd == 0, imm12 != 0;
+ slti_hint : IType : func3 == 0b010, opcode == 0b001'0011, rd == 0;
+ sltiu_hint: IType : func3 == 0b011, opcode == 0b001'0011, rd == 0;
+ xori_hint: IType : func3 == 0b100, rd == 0, opcode == 0b001'0011;
+ ori_hint: IType : func3 == 0b110, rd == 0b00000, opcode == 0b001'0011;
+ andi_hint: IType : func3 == 0b111, opcode == 0b001'0011, rd == 0;
+ srli_hint: RType : func7 == 0b000'0000, func3==0b101, rd == 0, opcode == 0b001'0011;
+ slt_hint: RType : func7 == 0b000'0000, func3==0b010, rd == 0, opcode == 0b011'0011;
+ slli_semihost: RType : func7 == 0b000'0000, func3==0b001, rd == 0, rs1 == 0, r_uimm5 == 31, opcode == 0b001'0011;
+ slli_hint1: RType : func7 == 0b000'0000, func3==0b001, rd == 0, rs1 != 0, opcode == 0b001'0011;
+ slli_hint2: RType : func7 == 0b000'0000, func3==0b001, rd == 0, r_uimm5 != 31, opcode == 0b001'0011;
+ srai_semihost: RType : func7 == 0b010'0000, func3==0b101, rd == 0, rs1 == 0, r_uimm5 == 7, opcode == 0b001'0011;
+ srai_hint1: RType : func7 == 0b010'0000, func3==0b101, rd == 0, rs1 != 0, opcode == 0b001'0011;
+ srai_hint2: RType : func7 == 0b010'0000, func3==0b101, rd == 0, r_uimm5 != 7, opcode == 0b001'0011;
+ add_hint1: RType : func7 == 0b000'0000, func3==0b000, rd == 0, rs1 != 0, opcode == 0b011'0011;
+ add_hint2: RType : func7 == 0b000'0000, func3==0b000, rd == 0, rs1 == 0, rs2 > 5, opcode == 0b011'0011;
+ add_hint3: RType : func7 == 0b000'0000, func3==0b000, rd == 0, rs1 == 0, rs2 < 2, opcode == 0b011'0011;
+ sub_hint: RType : func7 == 0b010'0000, func3==0b000, rd == 0, opcode == 0b011'0011;
+ sll_hint: RType : func7 == 0b000'0000, func3==0b001, rd == 0, opcode == 0b011'0011;
+ sltu_hint: RType : func7 == 0b000'0000, func3==0b011, rd == 0, opcode == 0b011'0011;
+ xor_hint: RType : func7 == 0b000'0000, func3==0b100, rd == 0, opcode == 0b011'0011;
+ srl_hint: RType : func7 == 0b000'0000, func3==0b101, rd == 0, opcode == 0b011'0011;
+ sra_hint: RType : func7 == 0b010'0000, func3==0b101, rd == 0, opcode == 0b011'0011;
+ or_hint : RType : func7 == 0b000'0000, func3==0b110, rd == 0, opcode == 0b011'0011;
+ and_hint: RType : func7 == 0b000'0000, func3==0b111, rd == 0, opcode == 0b011'0011;
+ fence_hint1: Fence : func3==0b000, rd == 0, rs1 != 0, fm == 0, pred == 0, opcode == 0b000'1111;
+ fence_hint2: Fence : func3==0b000, rd == 0, rs1 != 0, fm == 0, succ == 0, opcode == 0b000'1111;
+ fence_hint3: Fence : func3==0b000, rd != 0, rs1 == 0, fm == 0, pred == 0, opcode == 0b000'1111;
+ fence_hint4: Fence : func3==0b000, rd != 0, rs1 == 0, fm == 0, succ == 0, opcode == 0b000'1111;
+ fence_hint5: Fence : func3==0b000, rd == 0, rs1 == 0, fm == 0, pred == 0, succ != 0, opcode == 0b000'1111;
+ fence_hint6: Fence : func3==0b000, rd == 0, rs1 == 0, fm == 0, succ == 0, opcode == 0b000'1111;
+ fence_hint7: Fence : func3==0b000, rd != 0, rs1 == 0, fm == 0, pred != 1, succ == 0, opcode == 0b000'1111;
+};
+
+instruction group RiscVZve32xInst32[32] : Inst32Format {
+ //opcfg : VArith : func6 == 0bxxx'xxx, func3 == 0b111, opcode == 0b101'0111;
+ vsetvli_xn : VConfig : rs1 != 0, func1 == 0, func3 == 0b111, opcode == 0b101'0111;
+ vsetvli_nz : VConfig : rd != 0, rs1 == 0, func1 == 0, func3 == 0b111, opcode == 0b101'0111;
+ vsetvli_zz : VConfig : rd == 0, rs1 == 0, func1 == 0, func3 == 0b111, opcode == 0b101'0111;
+ vsetivli : VConfig : func2 == 0b11, func3 == 0b111, opcode == 0b101'0111;
+ vsetvl_xn : VConfig : rs1 != 0, func7 == 0b100'0000, func3 == 0b111, opcode == 0b101'0111;
+ vsetvl_nz : VConfig : rd != 0, rs1 == 0, func7 == 0b100'0000, func3 == 0b111, opcode == 0b101'0111;
+ vsetvl_zz : VConfig : rd == 0, rs1 == 0, func7 == 0b100'0000, func3 == 0b111, opcode == 0b101'0111;
+
+ // Unit stride, masked (vm=0).
+ vle8 : VMem : vm == 0, nf == 0, mew == 0, mop == 0b00, lumop == 0b00000, width == 0b000, opcode == 0b000'0111;
+ vle16 : VMem : vm == 0, nf == 0, mew == 0, mop == 0b00, lumop == 0b00000, width == 0b101, opcode == 0b000'0111;
+ vle32 : VMem : vm == 0, nf == 0, mew == 0, mop == 0b00, lumop == 0b00000, width == 0b110, opcode == 0b000'0111;
+ // Unit stride, unmasked (vm=1).
+ vle8_vm1 : VMem : vm == 1, nf == 0, mew == 0, mop == 0b00, lumop == 0b00000, width == 0b000, opcode == 0b000'0111;
+ vle16_vm1 : VMem : vm == 1, nf == 0, mew == 0, mop == 0b00, lumop == 0b00000, width == 0b101, opcode == 0b000'0111;
+ vle32_vm1 : VMem : vm == 1, nf == 0, mew == 0, mop == 0b00, lumop == 0b00000, width == 0b110, opcode == 0b000'0111;
+ // Mask load.
+ vlm : VMem : nf == 0, mew == 0, mop == 0b00, lumop == 0b01011, width == 0b000, opcode == 0b000'0111;
+ // Unit stride, fault first.
+ vle8ff : VMem : nf == 0, mew == 0, mop == 0b00, lumop == 0b10000, width == 0b000, opcode == 0b000'0111;
+ vle16ff : VMem : nf == 0, mew == 0, mop == 0b00, lumop == 0b10000, width == 0b101, opcode == 0b000'0111;
+ vle32ff : VMem : nf == 0, mew == 0, mop == 0b00, lumop == 0b10000, width == 0b110, opcode == 0b000'0111;
+ // Unit stride, whole register load.
+ vl1re8 : VMem : nf == 0, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b000, opcode == 0b000'0111;
+ vl1re16 : VMem : nf == 0, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b101, opcode == 0b000'0111;
+ vl1re32 : VMem : nf == 0, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b110, opcode == 0b000'0111;
+ vl2re8 : VMem : nf == 1, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b000, opcode == 0b000'0111;
+ vl2re16 : VMem : nf == 1, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b101, opcode == 0b000'0111;
+ vl2re32 : VMem : nf == 1, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b110, opcode == 0b000'0111;
+ vl4re8 : VMem : nf == 3, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b000, opcode == 0b000'0111;
+ vl4re16 : VMem : nf == 3, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b101, opcode == 0b000'0111;
+ vl4re32 : VMem : nf == 3, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b110, opcode == 0b000'0111;
+ vl8re8 : VMem : nf == 7, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b000, opcode == 0b000'0111;
+ vl8re16 : VMem : nf == 7, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b101, opcode == 0b000'0111;
+ vl8re32 : VMem : nf == 7, mop == 0b00, vm == 1, lumop == 0b01000, width == 0b110, opcode == 0b000'0111;
+ // Vector load strided.
+ vlse8 : VMem : nf == 0, mew == 0, mop == 0b10, width == 0b000, opcode == 0b000'0111;
+ vlse16 : VMem : nf == 0, mew == 0, mop == 0b10, width == 0b101, opcode == 0b000'0111;
+ vlse32 : VMem : nf == 0, mew == 0, mop == 0b10, width == 0b110, opcode == 0b000'0111;
+ // Vector load indexed, unordered.
+ vluxei8 : VMem : nf == 0, mew == 0, mop == 0b01, width == 0b000, opcode == 0b000'0111;
+ vluxei16: VMem : nf == 0, mew == 0, mop == 0b01, width == 0b101, opcode == 0b000'0111;
+ vluxei32: VMem : nf == 0, mew == 0, mop == 0b01, width == 0b110, opcode == 0b000'0111;
+ // Vector load indexed, ordered.
+ vloxei8 : VMem : nf == 0, mew == 0, mop == 0b11, width == 0b000, opcode == 0b000'0111;
+ vloxei16: VMem : nf == 0, mew == 0, mop == 0b11, width == 0b101, opcode == 0b000'0111;
+ vloxei32: VMem : nf == 0, mew == 0, mop == 0b11, width == 0b110, opcode == 0b000'0111;
+ // Vector segment load, unit stride.
+ vlsege8: VMem : nf != 0, mew == 0, mop == 0b00, lumop == 0b00000, width == 0b000, opcode == 0b000'0111;
+ vlsege16: VMem : nf != 0, mew == 0, mop == 0b00, lumop == 0b00000, width == 0b101, opcode == 0b000'0111;
+ vlsege32: VMem : nf != 0, mew == 0, mop == 0b00, lumop == 0b00000, width == 0b110, opcode == 0b000'0111;
+ // Vector segment load, strided.
+ vlssege8: VMem : nf != 0, mew == 0, mop == 0b10, width == 0b000, opcode == 0b000'0111;
+ vlssege16: VMem : nf != 0, mew == 0, mop == 0b10, width == 0b101, opcode == 0b000'0111;
+ vlssege32: VMem : nf != 0, mew == 0, mop == 0b10, width == 0b110, opcode == 0b000'0111;
+ // Vector segment load, indexed, unordered.
+ vluxsegei8: VMem : nf != 0, mew == 0, mop == 0b01, width == 0b000, opcode == 0b000'0111;
+ vluxsegei16: VMem : nf != 0, mew == 0, mop == 0b01, width == 0b101, opcode == 0b000'0111;
+ vluxsegei32: VMem : nf != 0, mew == 0, mop == 0b01, width == 0b110, opcode == 0b000'0111;
+ // Vector segement load, indexed, ordered.
+ vloxsegei8: VMem : nf != 0, mew == 0, mop == 0b11, width == 0b000, opcode == 0b000'0111;
+ vloxsegei16: VMem : nf != 0, mew == 0, mop == 0b11, width == 0b101, opcode == 0b000'0111;
+ vloxsegei32: VMem : nf != 0, mew == 0, mop == 0b11, width == 0b110, opcode == 0b000'0111;
+
+
+ // VECTOR STORES
+
+ // Unit stride.
+ vse8 : VMem : nf == 0, mew == 0, mop == 0b00, sumop == 0b00000, width == 0b000, opcode == 0b010'0111;
+ vse16 : VMem : nf == 0, mew == 0, mop == 0b00, sumop == 0b00000, width == 0b101, opcode == 0b010'0111;
+ vse32 : VMem : nf == 0, mew == 0, mop == 0b00, sumop == 0b00000, width == 0b110, opcode == 0b010'0111;
+ // Mask store.
+ vsm : VMem : nf == 0, mew == 0, mop == 0b00, sumop == 0b01011, width == 0b000, opcode == 0b010'0111;
+ // Unit stride, fault first.
+ vse8ff : VMem : nf == 0, mew == 0, mop == 0b00, sumop == 0b10000, width == 0b000, opcode == 0b010'0111;
+ vse16ff : VMem : nf == 0, mew == 0, mop == 0b00, sumop == 0b10000, width == 0b101, opcode == 0b010'0111;
+ vse32ff : VMem : nf == 0, mew == 0, mop == 0b00, sumop == 0b10000, width == 0b110, opcode == 0b010'0111;
+ // Unit stride, whole register store.
+ vs1re8 : VMem : nf == 0, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b000, opcode == 0b010'0111;
+ vs1re16 : VMem : nf == 0, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b101, opcode == 0b010'0111;
+ vs1re32 : VMem : nf == 0, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b110, opcode == 0b010'0111;
+ vs2re8 : VMem : nf == 1, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b000, opcode == 0b010'0111;
+ vs2re16 : VMem : nf == 1, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b101, opcode == 0b010'0111;
+ vs2re32 : VMem : nf == 1, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b110, opcode == 0b010'0111;
+ vs4re8 : VMem : nf == 3, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b000, opcode == 0b010'0111;
+ vs4re16 : VMem : nf == 3, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b101, opcode == 0b010'0111;
+ vs4re32 : VMem : nf == 3, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b110, opcode == 0b010'0111;
+ vs8re8 : VMem : nf == 7, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b000, opcode == 0b010'0111;
+ vs8re16 : VMem : nf == 7, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b101, opcode == 0b010'0111;
+ vs8re32 : VMem : nf == 7, mop == 0b00, vm == 1, sumop == 0b01000, width == 0b110, opcode == 0b010'0111;
+ // Store strided.
+ vsse8 : VMem : nf == 0, mew == 0, mop == 0b10, width == 0b000, opcode == 0b010'0111;
+ vsse16 : VMem : nf == 0, mew == 0, mop == 0b10, width == 0b101, opcode == 0b010'0111;
+ vsse32 : VMem : nf == 0, mew == 0, mop == 0b10, width == 0b110, opcode == 0b010'0111;
+ // Store indexed, unordered.
+ vsuxei8 : VMem : nf == 0, mew == 0, mop == 0b01, width == 0b000, opcode == 0b010'0111;
+ vsuxei16: VMem : nf == 0, mew == 0, mop == 0b01, width == 0b101, opcode == 0b010'0111;
+ vsuxei32: VMem : nf == 0, mew == 0, mop == 0b01, width == 0b110, opcode == 0b010'0111;
+ // Store indexed, ordered.
+ vsoxei8 : VMem : nf == 0, mew == 0, mop == 0b11, width == 0b000, opcode == 0b010'0111;
+ vsoxei16: VMem : nf == 0, mew == 0, mop == 0b11, width == 0b101, opcode == 0b010'0111;
+ vsoxei32: VMem : nf == 0, mew == 0, mop == 0b11, width == 0b110, opcode == 0b010'0111;
+ // Vector segment store, unit stride.
+ vssege8: VMem : nf != 0, mew == 0, mop == 0b00, sumop == 0b00000, width == 0b000, opcode == 0b010'0111;
+ vssege16: VMem : nf != 0, mew == 0, mop == 0b00, sumop == 0b00000, width == 0b101, opcode == 0b010'0111;
+ vssege32: VMem : nf != 0, mew == 0, mop == 0b00, sumop == 0b00000, width == 0b110, opcode == 0b010'0111;
+ // Vector segment store, strided.
+ vsssege8: VMem : nf != 0, mew == 0, mop == 0b10, width == 0b000, opcode == 0b010'0111;
+ vsssege16: VMem : nf != 0, mew == 0, mop == 0b10, width == 0b101, opcode == 0b010'0111;
+ vsssege32: VMem : nf != 0, mew == 0, mop == 0b10, width == 0b110, opcode == 0b010'0111;
+ // Vector segment store, indexed, unordered.
+ vsuxsegei8: VMem : nf != 0, mew == 0, mop == 0b01, width == 0b000, opcode == 0b010'0111;
+ vsuxsegei16: VMem : nf != 0, mew == 0, mop == 0b01, width == 0b101, opcode == 0b010'0111;
+ vsuxsegei32: VMem : nf != 0, mew == 0, mop == 0b01, width == 0b110, opcode == 0b010'0111;
+ // Vector segement store, indexed, ordered.
+ vsoxsegei8: VMem : nf != 0, mew == 0, mop == 0b11, width == 0b000, opcode == 0b010'0111;
+ vsoxsegei16: VMem : nf != 0, mew == 0, mop == 0b11, width == 0b101, opcode == 0b010'0111;
+ vsoxsegei32: VMem : nf != 0, mew == 0, mop == 0b11, width == 0b110, opcode == 0b010'0111;
+
+ // Integer: OPIVV, OPIVX, OPIVI
+ //opivv : VArith : func6 == 0bxxx'xxx, func3 == 0b000, opcode == 0b101'0111;
+ //opivx : VArith : func6 == 0bxxx'xxx, func3 == 0b100, opcode == 0b101'0111;
+ //opivi : VArith : func6 == 0bxxx'xxx, func3 == 0b011, opcode == 0b101'0111;
+
+ vadd_vv : VArith : func6 == 0b000'000, func3 == 0b000, opcode == 0b101'0111;
+ vadd_vx : VArith : func6 == 0b000'000, func3 == 0b100, opcode == 0b101'0111;
+ vadd_vi : VArith : func6 == 0b000'000, func3 == 0b011, opcode == 0b101'0111;
+ vsub_vv : VArith : func6 == 0b000'010, func3 == 0b000, opcode == 0b101'0111;
+ vsub_vx : VArith : func6 == 0b000'010, func3 == 0b100, opcode == 0b101'0111;
+ vrsub_vx : VArith : func6 == 0b000'011, func3 == 0b100, opcode == 0b101'0111;
+ vrsub_vi : VArith : func6 == 0b000'011, func3 == 0b011, opcode == 0b101'0111;
+ vminu_vv : VArith : func6 == 0b000'100, func3 == 0b000, opcode == 0b101'0111;
+ vminu_vx : VArith : func6 == 0b000'100, func3 == 0b100, opcode == 0b101'0111;
+ vmin_vv : VArith : func6 == 0b000'101, func3 == 0b000, opcode == 0b101'0111;
+ vmin_vx : VArith : func6 == 0b000'101, func3 == 0b100, opcode == 0b101'0111;
+ vmaxu_vv : VArith : func6 == 0b000'110, func3 == 0b000, opcode == 0b101'0111;
+ vmaxu_vx : VArith : func6 == 0b000'110, func3 == 0b100, opcode == 0b101'0111;
+ vmax_vv : VArith : func6 == 0b000'111, func3 == 0b000, opcode == 0b101'0111;
+ vmax_vx : VArith : func6 == 0b000'111, func3 == 0b100, opcode == 0b101'0111;
+ vand_vv : VArith : func6 == 0b001'001, func3 == 0b000, opcode == 0b101'0111;
+ vand_vx : VArith : func6 == 0b001'001, func3 == 0b100, opcode == 0b101'0111;
+ vand_vi : VArith : func6 == 0b001'001, func3 == 0b011, opcode == 0b101'0111;
+ vor_vv : VArith : func6 == 0b001'010, func3 == 0b000, opcode == 0b101'0111;
+ vor_vx : VArith : func6 == 0b001'010, func3 == 0b100, opcode == 0b101'0111;
+ vor_vi : VArith : func6 == 0b001'010, func3 == 0b011, opcode == 0b101'0111;
+ vxor_vv : VArith : func6 == 0b001'011, func3 == 0b000, opcode == 0b101'0111;
+ vxor_vx : VArith : func6 == 0b001'011, func3 == 0b100, opcode == 0b101'0111;
+ vxor_vi : VArith : func6 == 0b001'011, func3 == 0b011, opcode == 0b101'0111;
+ vrgather_vv : VArith : func6 == 0b001'100, func3 == 0b000, opcode == 0b101'0111;
+ vrgather_vx : VArith : func6 == 0b001'100, func3 == 0b100, opcode == 0b101'0111;
+ vrgather_vi : VArith : func6 == 0b001'100, func3 == 0b011, opcode == 0b101'0111;
+ vslideup_vx : VArith : func6 == 0b001'110, func3 == 0b100, opcode == 0b101'0111;
+ vslideup_vi : VArith : func6 == 0b001'110, func3 == 0b011, opcode == 0b101'0111;
+ vrgatherei16_vv : VArith : func6 == 0b001'110, func3 == 0b000, opcode == 0b101'0111;
+ vslidedown_vx : VArith : func6 == 0b001'111, func3 == 0b100, opcode == 0b101'0111;
+ vslidedown_vi : VArith : func6 == 0b001'111, func3 == 0b011, opcode == 0b101'0111;
+ vadc_vv : VArith : func6 == 0b010'000, vd != 0, vm == 0, func3 == 0b000, opcode == 0b101'0111;
+ vadc_vx : VArith : func6 == 0b010'000, vd != 0, vm == 0, func3 == 0b100, opcode == 0b101'0111;
+ vadc_vi : VArith : func6 == 0b010'000, vd != 0, vm == 0, func3 == 0b011, opcode == 0b101'0111;
+ vmadc_vv : VArith : func6 == 0b010'001, func3 == 0b000, opcode == 0b101'0111;
+ vmadc_vx : VArith : func6 == 0b010'001, func3 == 0b100, opcode == 0b101'0111;
+ vmadc_vi : VArith : func6 == 0b010'001, func3 == 0b011, opcode == 0b101'0111;
+ vsbc_vv : VArith : func6 == 0b010'010, vd != 0, vm == 0, func3 == 0b000, opcode == 0b101'0111;
+ vsbc_vx : VArith : func6 == 0b010'010, vd != 0, vm == 0, func3 == 0b100, opcode == 0b101'0111;
+ vmsbc_vv : VArith : func6 == 0b010'011, func3 == 0b000, opcode == 0b101'0111;
+ vmsbc_vx : VArith : func6 == 0b010'011, func3 == 0b100, opcode == 0b101'0111;
+ vmerge_vv : VArith : func6 == 0b010'111, vm == 0, func3 == 0b000, opcode == 0b101'0111;
+ vmerge_vx : VArith : func6 == 0b010'111, vm == 0, func3 == 0b100, opcode == 0b101'0111;
+ vmerge_vi : VArith : func6 == 0b010'111, vm == 0, func3 == 0b011, opcode == 0b101'0111;
+ vmv_vv : VArith : func6 == 0b010'111, vm == 1, vs2 == 0, func3 == 0b000, opcode == 0b101'0111;
+ vmv_vx : VArith : func6 == 0b010'111, vm == 1, vs2 == 0, func3 == 0b100, opcode == 0b101'0111;
+ vmv_vi : VArith : func6 == 0b010'111, vm == 1, vs2 == 0, func3 == 0b011, opcode == 0b101'0111;
+ vmseq_vv : VArith : func6 == 0b011'000, func3 == 0b000, opcode == 0b101'0111;
+ vmseq_vx : VArith : func6 == 0b011'000, func3 == 0b100, opcode == 0b101'0111;
+ vmseq_vi : VArith : func6 == 0b011'000, func3 == 0b011, opcode == 0b101'0111;
+ vmsne_vv : VArith : func6 == 0b011'001, func3 == 0b000, opcode == 0b101'0111;
+ vmsne_vx : VArith : func6 == 0b011'001, func3 == 0b100, opcode == 0b101'0111;
+ vmsne_vi : VArith : func6 == 0b011'001, func3 == 0b011, opcode == 0b101'0111;
+ vmsltu_vv : VArith : func6 == 0b011'010, func3 == 0b000, opcode == 0b101'0111;
+ vmsltu_vx : VArith : func6 == 0b011'010, func3 == 0b100, opcode == 0b101'0111;
+ vmslt_vv : VArith : func6 == 0b011'011, func3 == 0b000, opcode == 0b101'0111;
+ vmslt_vx : VArith : func6 == 0b011'011, func3 == 0b100, opcode == 0b101'0111;
+ vmsleu_vv : VArith : func6 == 0b011'100, func3 == 0b000, opcode == 0b101'0111;
+ vmsleu_vx : VArith : func6 == 0b011'100, func3 == 0b100, opcode == 0b101'0111;
+ vmsleu_vi : VArith : func6 == 0b011'100, func3 == 0b011, opcode == 0b101'0111;
+ vmsle_vv : VArith : func6 == 0b011'101, func3 == 0b000, opcode == 0b101'0111;
+ vmsle_vx : VArith : func6 == 0b011'101, func3 == 0b100, opcode == 0b101'0111;
+ vmsle_vi : VArith : func6 == 0b011'101, func3 == 0b011, opcode == 0b101'0111;
+ vmsgtu_vx : VArith : func6 == 0b011'110, func3 == 0b100, opcode == 0b101'0111;
+ vmsgtu_vi : VArith : func6 == 0b011'110, func3 == 0b011, opcode == 0b101'0111;
+ vmsgt_vx : VArith : func6 == 0b011'111, func3 == 0b100, opcode == 0b101'0111;
+ vmsgt_vi : VArith : func6 == 0b011'111, func3 == 0b011, opcode == 0b101'0111;
+ vsaddu_vv : VArith : func6 == 0b100'000, func3 == 0b000, opcode == 0b101'0111;
+ vsaddu_vx : VArith : func6 == 0b100'000, func3 == 0b100, opcode == 0b101'0111;
+ vsaddu_vi : VArith : func6 == 0b100'000, func3 == 0b011, opcode == 0b101'0111;
+ vsadd_vv : VArith : func6 == 0b100'001, func3 == 0b000, opcode == 0b101'0111;
+ vsadd_vx : VArith : func6 == 0b100'001, func3 == 0b100, opcode == 0b101'0111;
+ vsadd_vi : VArith : func6 == 0b100'001, func3 == 0b011, opcode == 0b101'0111;
+ vssubu_vv : VArith : func6 == 0b100'010, func3 == 0b000, opcode == 0b101'0111;
+ vssubu_vx : VArith : func6 == 0b100'010, func3 == 0b100, opcode == 0b101'0111;
+ vssub_vv : VArith : func6 == 0b100'011, func3 == 0b000, opcode == 0b101'0111;
+ vssub_vx : VArith : func6 == 0b100'011, func3 == 0b100, opcode == 0b101'0111;
+ vsll_vv : VArith : func6 == 0b100'101, func3 == 0b000, opcode == 0b101'0111;
+ vsll_vx : VArith : func6 == 0b100'101, func3 == 0b100, opcode == 0b101'0111;
+ vsll_vi : VArith : func6 == 0b100'101, func3 == 0b011, opcode == 0b101'0111;
+ vsmul_vv : VArith : func6 == 0b100'111, func3 == 0b000, opcode == 0b101'0111;
+ vsmul_vx : VArith : func6 == 0b100'111, func3 == 0b100, opcode == 0b101'0111;
+ vmv1r_vi : VArith : func6 == 0b100'111, uimm5 == 0, func3 == 0b011, opcode == 0b101'0111;
+ vmv2r_vi : VArith : func6 == 0b100'111, uimm5 == 1, func3 == 0b011, opcode == 0b101'0111;
+ vmv4r_vi : VArith : func6 == 0b100'111, uimm5 == 3, func3 == 0b011, opcode == 0b101'0111;
+ vmv8r_vi : VArith : func6 == 0b100'111, uimm5 == 7, func3 == 0b011, opcode == 0b101'0111;
+ vsrl_vv : VArith : func6 == 0b101'000, func3 == 0b000, opcode == 0b101'0111;
+ vsrl_vx : VArith : func6 == 0b101'000, func3 == 0b100, opcode == 0b101'0111;
+ vsrl_vi : VArith : func6 == 0b101'000, func3 == 0b011, opcode == 0b101'0111;
+ vsra_vv : VArith : func6 == 0b101'001, func3 == 0b000, opcode == 0b101'0111;
+ vsra_vx : VArith : func6 == 0b101'001, func3 == 0b100, opcode == 0b101'0111;
+ vsra_vi : VArith : func6 == 0b101'001, func3 == 0b011, opcode == 0b101'0111;
+ vssrl_vv : VArith : func6 == 0b101'010, func3 == 0b000, opcode == 0b101'0111;
+ vssrl_vx : VArith : func6 == 0b101'010, func3 == 0b100, opcode == 0b101'0111;
+ vssrl_vi : VArith : func6 == 0b101'010, func3 == 0b011, opcode == 0b101'0111;
+ vssra_vv : VArith : func6 == 0b101'011, func3 == 0b000, opcode == 0b101'0111;
+ vssra_vx : VArith : func6 == 0b101'011, func3 == 0b100, opcode == 0b101'0111;
+ vssra_vi : VArith : func6 == 0b101'011, func3 == 0b011, opcode == 0b101'0111;
+ vnsrl_vv : VArith : func6 == 0b101'100, func3 == 0b000, opcode == 0b101'0111;
+ vnsrl_vx : VArith : func6 == 0b101'100, func3 == 0b100, opcode == 0b101'0111;
+ vnsrl_vi : VArith : func6 == 0b101'100, func3 == 0b011, opcode == 0b101'0111;
+ vnsra_vv : VArith : func6 == 0b101'101, func3 == 0b000, opcode == 0b101'0111;
+ vnsra_vx : VArith : func6 == 0b101'101, func3 == 0b100, opcode == 0b101'0111;
+ vnsra_vi : VArith : func6 == 0b101'101, func3 == 0b011, opcode == 0b101'0111;
+ vnclipu_vv : VArith : func6 == 0b101'110, func3 == 0b000, opcode == 0b101'0111;
+ vnclipu_vx : VArith : func6 == 0b101'110, func3 == 0b100, opcode == 0b101'0111;
+ vnclipu_vi : VArith : func6 == 0b101'110, func3 == 0b011, opcode == 0b101'0111;
+ vnclip_vv : VArith : func6 == 0b101'111, func3 == 0b000, opcode == 0b101'0111;
+ vnclip_vx : VArith : func6 == 0b101'111, func3 == 0b100, opcode == 0b101'0111;
+ vnclip_vi : VArith : func6 == 0b101'111, func3 == 0b011, opcode == 0b101'0111;
+ vwredsumu_vv : VArith : func6 == 0b110'000, func3 == 0b000, opcode == 0b101'0111;
+ vwredsum_vv : VArith : func6 == 0b110'001, func3 == 0b000, opcode == 0b101'0111;
+
+ // Integer: OPMVV, OPMVX
+ //opmvv : VArith : func6 == 0bxxx'xxx, func3 == 0b010, opcode == 0b101'0111;
+ //opmvx : VArith : func6 == 0bxxx'xxx, func3 == 0b110, opcode == 0b101'0111;
+
+ vredsum_vv : VArith : func6 == 0b000'000, func3 == 0b010, opcode == 0b101'0111;
+ vredand_vv : VArith : func6 == 0b000'001, func3 == 0b010, opcode == 0b101'0111;
+ vredor_vv : VArith : func6 == 0b000'010, func3 == 0b010, opcode == 0b101'0111;
+ vredxor_vv : VArith : func6 == 0b000'011, func3 == 0b010, opcode == 0b101'0111;
+ vredminu_vv : VArith : func6 == 0b000'100, func3 == 0b010, opcode == 0b101'0111;
+ vredmin_vv : VArith : func6 == 0b000'101, func3 == 0b010, opcode == 0b101'0111;
+ vredmaxu_vv : VArith : func6 == 0b000'110, func3 == 0b010, opcode == 0b101'0111;
+ vredmax_vv : VArith : func6 == 0b000'111, func3 == 0b010, opcode == 0b101'0111;
+ vaaddu_vv : VArith : func6 == 0b001'000, func3 == 0b010, opcode == 0b101'0111;
+ vaaddu_vx : VArith : func6 == 0b001'000, func3 == 0b110, opcode == 0b101'0111;
+ vaadd_vv : VArith : func6 == 0b001'001, func3 == 0b010, opcode == 0b101'0111;
+ vaadd_vx : VArith : func6 == 0b001'001, func3 == 0b110, opcode == 0b101'0111;
+ vasubu_vv : VArith : func6 == 0b001'010, func3 == 0b010, opcode == 0b101'0111;
+ vasubu_vx : VArith : func6 == 0b001'010, func3 == 0b110, opcode == 0b101'0111;
+ vasub_vv : VArith : func6 == 0b001'011, func3 == 0b010, opcode == 0b101'0111;
+ vasub_vx : VArith : func6 == 0b001'011, func3 == 0b110, opcode == 0b101'0111;
+ vslide1up_vx : VArith : func6 == 0b001'110, func3 == 0b110, opcode == 0b101'0111;
+ vslide1down_vx : VArith : func6 == 0b001'111, func3 == 0b110, opcode == 0b101'0111;
+ vcompress_vv : VArith : func6 == 0b010'111, func3 == 0b010, opcode == 0b101'0111;
+ vmandnot_vv : VArith : func6 == 0b011'000, func3 == 0b010, opcode == 0b101'0111;
+ vmand_vv : VArith : func6 == 0b011'001, func3 == 0b010, opcode == 0b101'0111;
+ vmor_vv : VArith : func6 == 0b011'010, func3 == 0b010, opcode == 0b101'0111;
+ vmxor_vv : VArith : func6 == 0b011'011, func3 == 0b010, opcode == 0b101'0111;
+ vmornot_vv : VArith : func6 == 0b011'100, func3 == 0b010, opcode == 0b101'0111;
+ vmnand_vv : VArith : func6 == 0b011'101, func3 == 0b010, opcode == 0b101'0111;
+ vmnor_vv : VArith : func6 == 0b011'110, func3 == 0b010, opcode == 0b101'0111;
+ vmxnor_vv : VArith : func6 == 0b011'111, func3 == 0b010, opcode == 0b101'0111;
+
+ vdivu_vv : VArith : func6 == 0b100'000, func3 == 0b010, opcode == 0b101'0111;
+ vdivu_vx : VArith : func6 == 0b100'000, func3 == 0b110, opcode == 0b101'0111;
+ vdiv_vv : VArith : func6 == 0b100'001, func3 == 0b010, opcode == 0b101'0111;
+ vdiv_vx : VArith : func6 == 0b100'001, func3 == 0b110, opcode == 0b101'0111;
+ vremu_vv : VArith : func6 == 0b100'010, func3 == 0b010, opcode == 0b101'0111;
+ vremu_vx : VArith : func6 == 0b100'010, func3 == 0b110, opcode == 0b101'0111;
+ vrem_vv : VArith : func6 == 0b100'011, func3 == 0b010, opcode == 0b101'0111;
+ vrem_vx : VArith : func6 == 0b100'011, func3 == 0b110, opcode == 0b101'0111;
+ vmulhu_vv : VArith : func6 == 0b100'100, func3 == 0b010, opcode == 0b101'0111;
+ vmulhu_vx : VArith : func6 == 0b100'100, func3 == 0b110, opcode == 0b101'0111;
+ vmul_vv : VArith : func6 == 0b100'101, func3 == 0b010, opcode == 0b101'0111;
+ vmul_vx : VArith : func6 == 0b100'101, func3 == 0b110, opcode == 0b101'0111;
+ vmulhsu_vv : VArith : func6 == 0b100'110, func3 == 0b010, opcode == 0b101'0111;
+ vmulhsu_vx : VArith : func6 == 0b100'110, func3 == 0b110, opcode == 0b101'0111;
+ vmulh_vv : VArith : func6 == 0b100'111, func3 == 0b010, opcode == 0b101'0111;
+ vmulh_vx : VArith : func6 == 0b100'111, func3 == 0b110, opcode == 0b101'0111;
+ vmadd_vv : VArith : func6 == 0b101'001, func3 == 0b010, opcode == 0b101'0111;
+ vmadd_vx : VArith : func6 == 0b101'001, func3 == 0b110, opcode == 0b101'0111;
+ vnmsub_vv : VArith : func6 == 0b101'011, func3 == 0b010, opcode == 0b101'0111;
+ vnmsub_vx : VArith : func6 == 0b101'011, func3 == 0b110, opcode == 0b101'0111;
+ vmacc_vv : VArith : func6 == 0b101'101, func3 == 0b010, opcode == 0b101'0111;
+ vmacc_vx : VArith : func6 == 0b101'101, func3 == 0b110, opcode == 0b101'0111;
+ vnmsac_vv : VArith : func6 == 0b101'111, func3 == 0b010, opcode == 0b101'0111;
+ vnmsac_vx : VArith : func6 == 0b101'111, func3 == 0b110, opcode == 0b101'0111;
+ vwaddu_vv : VArith : func6 == 0b110'000, func3 == 0b010, opcode == 0b101'0111;
+ vwaddu_vx : VArith : func6 == 0b110'000, func3 == 0b110, opcode == 0b101'0111;
+ vwadd_vv : VArith : func6 == 0b110'001, func3 == 0b010, opcode == 0b101'0111;
+ vwadd_vx : VArith : func6 == 0b110'001, func3 == 0b110, opcode == 0b101'0111;
+ vwsubu_vv : VArith : func6 == 0b110'010, func3 == 0b010, opcode == 0b101'0111;
+ vwsubu_vx : VArith : func6 == 0b110'010, func3 == 0b110, opcode == 0b101'0111;
+ vwsub_vv : VArith : func6 == 0b110'011, func3 == 0b010, opcode == 0b101'0111;
+ vwsub_vx : VArith : func6 == 0b110'011, func3 == 0b110, opcode == 0b101'0111;
+ vwaddu_w_vv : VArith : func6 == 0b110'100, func3 == 0b010, opcode == 0b101'0111;
+ vwaddu_w_vx : VArith : func6 == 0b110'100, func3 == 0b110, opcode == 0b101'0111;
+ vwadd_w_vv : VArith : func6 == 0b110'101, func3 == 0b010, opcode == 0b101'0111;
+ vwadd_w_vx : VArith : func6 == 0b110'101, func3 == 0b110, opcode == 0b101'0111;
+ vwsubu_w_vv : VArith : func6 == 0b110'110, func3 == 0b010, opcode == 0b101'0111;
+ vwsubu_w_vx : VArith : func6 == 0b110'110, func3 == 0b110, opcode == 0b101'0111;
+ vwsub_w_vv : VArith : func6 == 0b110'111, func3 == 0b010, opcode == 0b101'0111;
+ vwsub_w_vx : VArith : func6 == 0b110'111, func3 == 0b110, opcode == 0b101'0111;
+ vwmulu_vv : VArith : func6 == 0b111'000, func3 == 0b010, opcode == 0b101'0111;
+ vwmulu_vx : VArith : func6 == 0b111'000, func3 == 0b110, opcode == 0b101'0111;
+ vwmulsu_vv : VArith : func6 == 0b111'010, func3 == 0b010, opcode == 0b101'0111;
+ vwmulsu_vx : VArith : func6 == 0b111'010, func3 == 0b110, opcode == 0b101'0111;
+ vwmul_vv : VArith : func6 == 0b111'011, func3 == 0b010, opcode == 0b101'0111;
+ vwmul_vx : VArith : func6 == 0b111'011, func3 == 0b110, opcode == 0b101'0111;
+ vwmaccu_vv : VArith : func6 == 0b111'100, func3 == 0b010, opcode == 0b101'0111;
+ vwmaccu_vx : VArith : func6 == 0b111'100, func3 == 0b110, opcode == 0b101'0111;
+ vwmacc_vv : VArith : func6 == 0b111'101, func3 == 0b010, opcode == 0b101'0111;
+ vwmacc_vx : VArith : func6 == 0b111'101, func3 == 0b110, opcode == 0b101'0111;
+ vwmaccus_vv : VArith : func6 == 0b111'110, func3 == 0b010, opcode == 0b101'0111;
+ vwmaccus_vx : VArith : func6 == 0b111'110, func3 == 0b110, opcode == 0b101'0111;
+ vwmaccsu_vv : VArith : func6 == 0b111'111, func3 == 0b010, opcode == 0b101'0111;
+ vwmaccsu_vx : VArith : func6 == 0b111'111, func3 == 0b110, opcode == 0b101'0111;
+
+ // VWXUNARY0 vv: VArith : func6 == 0b010'000, func3 == 0b010, opcode == 0b101'0111;
+ vmv_x_s : VArith : func6 == 0b010'000, vs1 == 0b00000, func3 == 0b010, opcode == 0b101'0111;
+ vcpop : VArith : func6 == 0b010'000, vs1 == 0b10000, func3 == 0b010, opcode == 0b101'0111;
+ vfirst : VArith : func6 == 0b010'000, vs1 == 0b10001, func3 == 0b010, opcode == 0b101'0111;
+
+ // VRXUNARY0 vx: VArith : func6 == 0b010'000, func3 == 0b110, opcode == 0b101'0111;
+ vmv_s_x : VArith : func6 == 0b010'000, vs2 == 0, func3 == 0b110, opcode == 0b101'0111;
+
+ // VXUNARY0 vv : VArith : func6 == 0b010'010, func3 == 0b010, opcode == 0b101'0111;
+ vzext_vf8: VArith : func6 == 0b010'010, vs1 == 0b00010, func3 == 0b010, opcode == 0b101'0111;
+ vsext_vf8: VArith : func6 == 0b010'010, vs1 == 0b00011, func3 == 0b010, opcode == 0b101'0111;
+ vzext_vf4: VArith : func6 == 0b010'010, vs1 == 0b00100, func3 == 0b010, opcode == 0b101'0111;
+ vsext_vf4: VArith : func6 == 0b010'010, vs1 == 0b00101, func3 == 0b010, opcode == 0b101'0111;
+ vzext_vf2: VArith : func6 == 0b010'010, vs1 == 0b00110, func3 == 0b010, opcode == 0b101'0111;
+ vsext_vf2: VArith : func6 == 0b010'010, vs1 == 0b00111, func3 == 0b010, opcode == 0b101'0111;
+
+ // VMUNARY vv : VArith : func6 == 0b010'100, func3 == 0b010, opcode == 0b101'0111;
+ vmsbf : VArith : func6 == 0b010'100, vs1 == 0b00001, func3 == 0b010, opcode == 0b101'0111;
+ vmsof : VArith : func6 == 0b010'100, vs1 == 0b00010, func3 == 0b010, opcode == 0b101'0111;
+ vmsif : VArith : func6 == 0b010'100, vs1 == 0b00011, func3 == 0b010, opcode == 0b101'0111;
+ viota : VArith : func6 == 0b010'100, vs1 == 0b10000, func3 == 0b010, opcode == 0b101'0111;
+ vid : VArith : func6 == 0b010'100, vs1 == 0b10001, func3 == 0b010, opcode == 0b101'0111;
+};
+
+instruction group RiscVZbbInst32[32] : Inst32Format {
+ andn: RType : func7 == 0b010'0000, func3 == 0b111, opcode == 0b011'0011;
+ orn: RType : func7 == 0b010'0000, func3 == 0b110, opcode == 0b011'0011;
+ xnor: RType : func7 == 0b010'0000, func3 == 0b100, opcode == 0b011'0011;
+ clz: RType : func7 == 0b011'0000, rs2 == 0b0'0000, func3 == 0b001, opcode == 0b001'0011;
+ ctz: RType : func7 == 0b011'0000, rs2 == 0b0'0001, func3 == 0b001, opcode == 0b001'0011;
+ cpop: RType : func7 == 0b011'0000, rs2 == 0b0'0010, func3 == 0b001, opcode == 0b001'0011;
+ max: RType : func7 == 0b000'0101, func3 == 0b110, opcode == 0b011'0011;
+ maxu: RType : func7 == 0b000'0101, func3 == 0b111, opcode == 0b011'0011;
+ min: RType : func7 == 0b000'0101, func3 == 0b100, opcode == 0b011'0011;
+ minu: RType : func7 == 0b000'0101, func3 == 0b101, opcode == 0b011'0011;
+ sext_b: RType : func7 == 0b011'0000, rs2 == 0b0'0100, func3 == 0b001, opcode == 0b001'0011;
+ sext_h: RType : func7 == 0b011'0000, rs2 == 0b0'0101, func3 == 0b001, opcode == 0b001'0011;
+ rol: RType : func7 == 0b011'0000, func3 == 0b001, opcode == 0b011'0011;
+ ror: RType : func7 == 0b011'0000, func3 == 0b101, opcode == 0b011'0011;
+ orcb: RType : func7 == 0b001'0100, rs2 == 0b0'0111, func3 == 0b101, opcode == 0b001'0011;
+ rev8: RType : func7 == 0b011'0100, rs2 == 0b1'1000, func3 == 0b101, opcode == 0b001'0011;
+};
+
+instruction group RiscVZbbInst32Only[32] : Inst32Format {
+ zext_h: RType : func7 == 0b000'0100, rs2 == 0b0'0000, func3 == 0b100, opcode == 0b011'0011;
+}
+
+instruction group RiscVZbbImmInst32[32] : Inst32Format {
+ rori: RType : func7 == 0b011'0000, func3 == 0b101, opcode == 0b001'0011;
+}
diff --git a/sim/kelvin_v2.isa b/sim/kelvin_v2.isa
new file mode 100644
index 0000000..9943ea6
--- /dev/null
+++ b/sim/kelvin_v2.isa
@@ -0,0 +1,1804 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines the kelvin v2 isa for mpact-sim. For more info on mpact-sim
+// isa format, check: go/mpact-sim-codelabs-riscv-instruction-decoder
+
+// First disasm field is 18 char wide and left justified.
+disasm widths = {-18};
+
+int global_latency = 0;
+
+isa KelvinV2 {
+ namespace kelvin::sim::isa32_v2;
+ slots { kelvin_v2; }
+}
+
+// TODO: b/448154052 - Kelvin V2 sim should reuse mpact-riscv isa and bin_fmt
+// files.
+
+// Basic integer ALU instructions, part of the RiscV 32i subset.
+slot riscv32i {
+ includes {
+ #include "external/com_google_mpact-riscv/riscv/riscv_i_instructions.h"
+ }
+ default size = 4;
+ default latency = global_latency;
+ resources TwoOp = { next_pc, rs1 : rd[..rd]};
+ resources ThreeOp = { next_pc, rs1, rs2 : rd[..rd]};
+ opcodes {
+ addi{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "addi", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIAdd";
+ nop{},
+ resources: { next_pc },
+ disasm: "nop",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ slti{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "slti", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISlt";
+ sltiu{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "sltiu", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISltu";
+ andi{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "andi", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIAnd";
+ ori{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "ori", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIOr";
+ xori{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "xori", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIXor";
+ slli{: rs1, I_uimm5 : rd},
+ resources: TwoOp,
+ disasm: "slli", "%rd, %rs1, 0x%(I_uimm5:x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISll";
+ srli{: rs1, I_uimm5 : rd},
+ resources: TwoOp,
+ disasm: "srli", "%rd %rs1, 0x%(I_uimm5:x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISrl";
+ srai{: rs1, I_uimm5 : rd},
+ resources: TwoOp,
+ disasm: "srai", "%rd, %rs1, 0x%(I_uimm5:x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISra";
+ lui{: U_imm20 : rd},
+ resources: { next_pc : rd[0..]},
+ disasm: "lui", "%rd, 0x%(U_imm20:08x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVILui";
+ auipc{: U_imm20 : rd},
+ resources: { next_pc : rd[0..]},
+ disasm: "auipc", "%rd, 0x%(U_imm20:08x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIAuipc";
+ add{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "add", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIAdd";
+ slt{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "slt", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISlt";
+ sltu{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "sltu", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISltu";
+ and{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "and", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIAnd";
+ or{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "or", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIOr";
+ xor{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "xor", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIXor";
+ sll{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "sll", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISll";
+ srl{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "srl", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISrl";
+ sub{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "sub", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISub";
+ sra{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "sra", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISra";
+ hint{},
+ disasm: "hint",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ jal{: J_imm20 : next_pc, rd},
+ resources: { next_pc : next_pc[0..], rd[0..]},
+ disasm: "jal", "%rd, 0x%(@+J_imm20:08x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIJal";
+ jalr{: rs1, J_imm12 : next_pc, rd},
+ resources: { next_pc, rs1 : next_pc[0..], rd[0..]},
+ disasm: "jalr", "%rd, %rs1, %J_imm12",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIJalr";
+ j{: J_imm20 : next_pc, rd},
+ resources: { next_pc : next_pc[0..], rd[0..]},
+ disasm: "j", "0x%(@+J_imm20:08x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIJal";
+ jr{: rs1, J_imm12 : next_pc, rd},
+ resources: { next_pc, rs1 : next_pc[0..], rd[0..]},
+ disasm: "jr", "%rs1, %J_imm12",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIJalr";
+ beq{: rs1, rs2, B_imm12 : next_pc},
+ resources: { next_pc, rs1, rs2 : next_pc[0..]},
+ disasm: "beq", "%rs1, %rs2, 0x%(@+B_imm12:08x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIBeq";
+ bne{: rs1, rs2, B_imm12 : next_pc},
+ resources: { next_pc, rs1, rs2 : next_pc[0..]},
+ disasm: "bne", "%rs1, %rs2, 0x%(@+B_imm12:08x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIBne";
+ blt{: rs1, rs2, B_imm12 : next_pc},
+ resources: { next_pc, rs1, rs2 : next_pc[0..]},
+ disasm: "blt", "%rs1, %rs2, 0x%(@+B_imm12:08x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIBlt";
+ bltu{: rs1, rs2, B_imm12 : next_pc},
+ resources: { next_pc, rs1, rs2 : next_pc[0..]},
+ disasm: "bltu", "%rs1, %rs2, 0x%(@+B_imm12:08x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIBltu";
+ bge{: rs1, rs2, B_imm12 : next_pc},
+ resources: { next_pc, rs1, rs2 : next_pc[0..]},
+ disasm: "bge", "%rs1, %rs2, 0x%(@+B_imm12:08x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIBge";
+ bgeu{: rs1, rs2, B_imm12 : next_pc},
+ resources: { next_pc, rs1, rs2 : next_pc[0..]},
+ disasm: "bgeu", "%rs1, %rs2, 0x%(@+B_imm12:08x)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVIBgeu";
+ lw{(: rs1, I_imm12), (: : rd)},
+ resources: { next_pc, rs1 : rd[0..]},
+ disasm: "lw", "%rd, %I_imm12(%rs1)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVILw",
+ "&::mpact::sim::riscv::RV32::RiscVILwChild";
+ lh{(: rs1, I_imm12 :), (: : rd)},
+ resources: { next_pc, rs1 : rd[0..]},
+ disasm: "lh", "%rd, %I_imm12(%rs1)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVILh",
+ "&::mpact::sim::riscv::RV32::RiscVILhChild";
+ lhu{(: rs1, I_imm12 :), (: : rd)},
+ resources: { next_pc, rs1 : rd[0..]},
+ disasm: "lhu", "%rd, %I_imm12(%rs1)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVILhu",
+ "&::mpact::sim::riscv::RV32::RiscVILhuChild";
+ lb{(: rs1, I_imm12 :), (: : rd)},
+ resources: { next_pc, rs1 : rd[0..]},
+ disasm: "lb", "%rd, %I_imm12(%rs1)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVILb",
+ "&::mpact::sim::riscv::RV32::RiscVILbChild";
+ lbu{(: rs1, I_imm12 :), (: : rd)},
+ resources: { next_pc, rs1 : rd[0..]},
+ disasm: "lbu", "%rd, %I_imm12(%rs1)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVILbu",
+ "&::mpact::sim::riscv::RV32::RiscVILbuChild";
+ sw{: rs1, S_imm12, rs2 : },
+ resources: { next_pc, rs1, rs2 : },
+ disasm: "sw", "%rs2, %S_imm12(%rs1)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISw";
+ sh{: rs1, S_imm12, rs2 : },
+ resources: { next_pc, rs1, rs2 : },
+ disasm: "sh", "%rs2, %S_imm12(%rs1)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISh";
+ sb{: rs1, S_imm12, rs2 : },
+ resources: { next_pc, rs1, rs2 : },
+ disasm: "sb", "%rs2, %S_imm12(%rs1)",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVISb";
+ fence{: pred, succ : },
+ disasm: "fence",
+ semfunc: "&::mpact::sim::riscv::RiscVIFence";
+ fence_tso{},
+ disasm: "fence.tso",
+ semfunc: "&::mpact::sim::riscv::RiscVIFenceTso";
+ ecall{},
+ disasm: "ecall",
+ semfunc: "&::mpact::sim::riscv::RiscVIEcall";
+ ebreak{},
+ disasm: "ebreak",
+ semfunc: "&::mpact::sim::riscv::RiscVIEbreak";
+ }
+}
+
+// This slot contains the hint instructions. These execute as nops, but can be
+// changed to provide performance hints to a simulated microarchitecture.
+slot riscv32_hints {
+ includes {
+ #include "external/com_google_mpact-riscv/riscv/riscv_i_instructions.h"
+ }
+ default size = 4;
+ default latency = global_latency;
+ resources TwoOp = { next_pc, rs1 : rd[..rd]};
+ resources ThreeOp = { next_pc, rs1, rs2 : rd[..rd]};
+ opcodes {
+ addi_hint1{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "addi", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ addi_hint2{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "addi", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ slti_hint{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "slti", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ sltiu_hint{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "sltiu", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ andi_hint{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "andi", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ ori_hint{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "ori", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ xori_hint{: rs1, I_imm12 : rd},
+ resources: TwoOp,
+ disasm: "xori", "%rd, %rs1, %I_imm12",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ slli_semihost{: rs1, I_uimm5 : rd},
+ resources: TwoOp,
+ disasm: "slli", "%rd, %rs1, 0x%(I_uimm5:x)",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ slli_hint1{: rs1, I_uimm5 : rd},
+ resources: TwoOp,
+ disasm: "slli", "%rd, %rs1, 0x%(I_uimm5:x)",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ slli_hint2{: rs1, I_uimm5 : rd},
+ resources: TwoOp,
+ disasm: "slli", "%rd, %rs1, 0x%(I_uimm5:x)",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ srli_hint{: rs1, I_uimm5 : rd},
+ resources: TwoOp,
+ disasm: "srli", "%rd %rs1, 0x%(I_uimm5:x)",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ srai_semihost{: rs1, I_uimm5 : rd},
+ resources: TwoOp,
+ disasm: "srai", "%rd, %rs1, 0x%(I_uimm5:x)",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ srai_hint1{: rs1, I_uimm5 : rd},
+ resources: TwoOp,
+ disasm: "srai", "%rd, %rs1, 0x%(I_uimm5:x)",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ srai_hint2{: rs1, I_uimm5 : rd},
+ resources: TwoOp,
+ disasm: "srai", "%rd, %rs1, 0x%(I_uimm5:x)",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ lui_hint{: U_imm20 : rd},
+ resources: { next_pc : rd[0..]},
+ disasm: "lui", "%rd, 0x%(U_imm20:08x)",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ auipc_hint{: U_imm20 : rd},
+ resources: { next_pc : rd[0..]},
+ disasm: "auipc", "%rd, 0x%(U_imm20:08x)",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ add_hint1{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "add", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ add_hint2{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "add", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ add_hint3{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "add", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ and_hint{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "and", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ or_hint{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "or", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ xor_hint{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "xor", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ sll_hint{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "sll", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ srl_hint{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "srl", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ sub_hint{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "sub", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ sra_hint{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "sra", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ slt_hint{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "slt", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ sltu_hint{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "sltu", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ fence_hint1{: pred, succ :},
+ disasm: "fence",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ fence_hint2{: pred, succ :},
+ disasm: "fence",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ fence_hint3{: pred, succ :},
+ disasm: "fence",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ fence_hint4{: pred, succ :},
+ disasm: "fence",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ fence_hint5{: pred, succ :},
+ disasm: "fence",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ fence_hint6{: pred, succ :},
+ disasm: "fence",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ fence_hint7{: pred, succ :},
+ disasm: "fence",
+ semfunc: "&::mpact::sim::riscv::RiscVINop";
+ }
+}
+
+
+// Instruction fence.
+slot zfencei {
+ includes {
+ #include "external/com_google_mpact-riscv/riscv/riscv_zfencei_instructions.h"
+ }
+ default size = 4;
+ default latency = global_latency;
+ opcodes {
+ fencei{: I_imm12 : },
+ disasm: "fence.i",
+ semfunc: "&::mpact::sim::riscv::RiscVZFencei";
+ }
+}
+
+// RiscV32 multiply/divide instructions.
+slot riscv32m {
+ includes {
+ #include "external/com_google_mpact-riscv/riscv/riscv_m_instructions.h"
+ }
+ default size = 4;
+ default latency = global_latency;
+ resources ThreeOp = { next_pc, rs1, rs2 : rd[..rd]};
+ opcodes {
+ mul{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "mul", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::MMul";
+ mulh{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "mulh", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::MMulh";
+ mulhu{: rs1, rs2: rd},
+ resources: ThreeOp,
+ disasm: "mulhu", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::MMulhu";
+ mulhsu{: rs1, rs2: rd},
+ resources: ThreeOp,
+ disasm: "mulhsu", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::MMulhsu";
+ div{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "div", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::MDiv";
+ divu{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "divu", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::MDivu";
+ rem{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "rem", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::MRem";
+ remu{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "remu", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::MRemu";
+ }
+}
+
+
+// RiscV32 CSR manipulation instructions.
+slot zicsr {
+ includes {
+ #include "external/com_google_mpact-riscv/riscv/riscv_zicsr_instructions.h"
+ }
+ default size = 4;
+ default latency = global_latency;
+ opcodes {
+ csrrw{: rs1, csr : rd, csr},
+ resources: { next_pc, rs1, csr : rd[0..], csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrw",
+ disasm: "csrw", "%rd, %csr, %rs1";
+ csrrs{: rs1, csr : rd, csr},
+ resources: { next_pc, rs1, csr : rd[0..], csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrs",
+ disasm: "csrs", "%rd, %csr, %rs1";
+ csrrc{: rs1, csr : rd, csr},
+ resources: { next_pc, rs1, csr : rd[0..], csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrc",
+ disasm: "csrc", "%rd, %csr, %rs1";
+ csrrs_nr{: rs1, csr : rd, csr},
+ resources: { next_pc, rs1, csr : rd[0..], csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrs",
+ disasm: "csrs", "%csr, %rs1";
+ csrrc_nr{: rs1, csr : rd, csr},
+ resources: { next_pc, rs1, csr : rd[0..], csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrc",
+ disasm: "csrc", "%csr, %rs1";
+ csrrw_nr{: rs1, csr : csr},
+ resources: { next_pc, rs1: csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrwNr", // rd == 0 (x0).
+ disasm: "csrw", "%csr, %rs1";
+ csrrs_nw{: csr : rd},
+ resources: { next_pc, csr: rd[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrNw", // rs1 == 0 (x0).
+ disasm: "csrs", "%rd, %csr";
+ csrrc_nw{: csr : rd},
+ resources: { next_pc, csr: rd[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrNw", // rs1 == 0 (x0).
+ disasm: "csrc", "%rd, %csr";
+ csrrwi{: CSR_uimm5, csr : rd, csr},
+ resources: { next_pc, csr: rd[0..], csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrw",
+ disasm: "csrwi", "%rd, %csr, %CSR_uimm5";
+ csrrsi{: CSR_uimm5, csr : rd, csr},
+ resources: { next_pc, csr: rd[0..], csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrs",
+ disasm: "csrsi", "%rd, %csr, %CSR_uimm5";
+ csrrci{: CSR_uimm5, csr : rd, csr},
+ resources: { next_pc, csr: rd[0..], csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrc",
+ disasm: "csrci", "%rd, %csr, %CSR_uimm5";
+ csrrsi_nr{: CSR_uimm5, csr : rd, csr},
+ resources: { next_pc, csr: rd[0..], csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrs",
+ disasm: "csrsi", "%csr, %CSR_uimm5";
+ csrrci_nr{: CSR_uimm5, csr : rd, csr},
+ resources: { next_pc, csr: rd[0..], csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrc",
+ disasm: "csrci", "%csr, %CSR_uimm5";
+ csrrwi_nr{: CSR_uimm5, csr : csr},
+ resources: { next_pc : csr[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrwNr", // rd == 0 (x0).
+ disasm: "csrrwi", "%csr, %CSR_uimm5";
+ csrrsi_nw{: csr : rd},
+ resources: { next_pc, csr : rd[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrNw", // uimm5 == 0.
+ disasm: "csrsi", "%rd, %csr, 0";
+ csrrci_nw{: csr : rd},
+ resources: { next_pc, csr : rd[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZiCsrrNw", // uimm5 == 0.
+ disasm: "csrwi", "%rd, %csr, 0";
+ unimp{},
+ disasm: "unimp",
+ semfunc: "&::mpact::sim::riscv::RiscVIUnimplemented";
+ }
+}
+
+// RiscV32 F (single precision floating point) instructions.
+slot riscv32f {
+ includes {
+ #include "external/com_google_mpact-riscv/riscv/riscv_f_instructions.h"
+ }
+ default size = 4;
+ default latency = global_latency;
+ resources TwoOp = { next_pc, frs1 : frd[0..]};
+ resources ThreeOp = { next_pc, frs1, frs2 : frd[0..]};
+ resources FourOp = { next_pc, frs1, frs2, frs3 : frd[0..]};
+ opcodes {
+ flw{(: rs1, I_imm12 : ), (: : frd)},
+ resources: { next_pc, rs1 : frd[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVILw",
+ "&::mpact::sim::riscv::RiscVIFlwChild",
+ disasm: "flw", "%frd, %I_imm12(%rs1)";
+ fsw{: rs1, S_imm12, frs2},
+ resources: { next_pc, rs1, frs2},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVFSw",
+ disasm: "fsw", "%frs2, %S_imm12(%rs1)";
+ fadd_s{: frs1, frs2, rm : frd},
+ resources: ThreeOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFAdd",
+ disasm: "fadd", "%frd, %frs1, %frs2";
+ fsub_s{: frs1, frs2, rm : frd},
+ resources: ThreeOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFSub",
+ disasm: "fsub", "%frd, %frs1, %frs2";
+ fmul_s{: frs1, frs2, rm : frd},
+ resources: ThreeOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFMul",
+ disasm: "fmul", "%frd, %frs1, %frs2";
+ fdiv_s{: frs1, frs2, rm : frd},
+ resources: ThreeOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFDiv",
+ disasm: "fdiv", "%frd, %frs1, %frs2";
+ fsqrt_s{: frs1, rm : frd, fflags},
+ resources: TwoOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFSqrt",
+ disasm: "fsqrt", "%frd, %frs1";
+ fmin_s{: frs1, frs2 : frd, fflags},
+ resources: ThreeOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFMin",
+ disasm: "fmin", "%frd, %frs1, %frs2";
+ fmax_s{: frs1, frs2 : frd, fflags},
+ resources: ThreeOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFMax",
+ disasm: "fmax", "%frd, %frs1, %frs2";
+ fmadd_s{: frs1, frs2, frs3, rm : frd, fflags},
+ resources: FourOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFMadd",
+ disasm: "fmadd", "%frd, %frs1, %frs2, %frs3";
+ fmsub_s{: frs1, frs2, frs3, rm : frd, fflags},
+ resources: FourOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFMsub",
+ disasm: "fmsub", "%frd, %frs1, %frs2, %frs3";
+ fnmadd_s{: frs1, frs2, frs3, rm : frd, fflags},
+ resources: FourOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFNmadd",
+ disasm: "fnmadd", "%frd, %frs1, %frs2, %frs3";
+ fnmsub_s{: frs1, frs2, frs3, rm : frd, fflags},
+ resources: FourOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFNmsub",
+ disasm: "fnmsub", "%frd, %frs1, %frs2, %frs3";
+ fcvt_ws{: frs1, rm : rd, fflags},
+ resources: TwoOp,
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVFCvtWs",
+ disasm: "fcvt.w.s", "%rd, %frs1";
+ fcvt_sw{: rs1, rm : frd},
+ resources: TwoOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFCvtSw",
+ disasm: "fcvt.s.w", "%frd, %rs1";
+ fcvt_wus{: frs1, rm : rd, fflags},
+ resources: TwoOp,
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVFCvtWus",
+ disasm: "fcvt.wu.s", "%rd, %frs1";
+ fcvt_swu{: rs1, rm : frd},
+ resources: TwoOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFCvtSwu",
+ disasm: "fcvt.s.wu", "%frd, %rs1";
+ fsgnj_s{: frs1, frs2 : frd},
+ resources: ThreeOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFSgnj",
+ disasm: "fsgn.s", "%frd, %frs1, %frs2";
+ fsgnjn_s{: frs1, frs2 : frd},
+ resources: ThreeOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFSgnjn",
+ disasm: "fsgnjx.s", "%frd, %frs1, %frs2";
+ fsgnjx_s{: frs1, frs2 : frd},
+ resources: ThreeOp,
+ semfunc: "&::mpact::sim::riscv::RiscVFSgnjx",
+ disasm: "fsgnjx.s", "%frd, %frs1, %frs2";
+ fmv_xw{: frs1 : rd},
+ resources: { next_pc, frs1 : rd[0..]},
+ disasm: "mv.x.w", "%rd, %frs1",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVFMvxw";
+ fmv_wx{: rs1 : frd},
+ resources: { next_pc, rs1 : frd[0..]},
+ disasm: "mv.w.x", "%frd, %rs1",
+ semfunc: "&::mpact::sim::riscv::RiscVFMvwx";
+ fcmpeq_s{: frs1, frs2 : rd, fflags},
+ resources: { next_pc, frs1, frs2 : rd[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVFCmpeq",
+ disasm: "fcmpeq", "%rd, %frs1, %frs2";
+ fcmplt_s{: frs1, frs2 : rd, fflags},
+ resources: { next_pc, frs1, frs2 : rd[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVFCmplt",
+ disasm: "fcmplt", "%rd, %frs1, %frs2";
+ fcmple_s{: frs1, frs2 : rd, fflags},
+ resources: { next_pc, frs1, frs2 : rd[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVFCmple",
+ disasm: "fcmple", "%rd, %frs1, %frs2";
+ fclass_s{: frs1 : rd},
+ resources: { next_pc, frs1 : rd[0..]},
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVFClass",
+ disasm: "fclass", "%rd, %frs1";
+ }
+}
+
+
+// Privileged instructions.
+slot privileged {
+ includes {
+ #include "external/com_google_mpact-riscv/riscv/riscv_priv_instructions.h"
+ }
+ default size = 4;
+ default latency = global_latency;
+ opcodes {
+ uret{: : next_pc(0)},
+ disasm: "uret",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVPrivURet";
+ sret{: : next_pc(0)},
+ disasm: "sret",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVPrivSRet";
+ mret{: : next_pc(0)},
+ disasm: "mret",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVPrivMRet";
+ wfi{},
+ disasm: "wfi",
+ semfunc: "&::mpact::sim::riscv::RiscVPrivWfi";
+ // The sfence instruction has 4 behaviors depending on if rs1 and/or rs2
+ // are 0. These behaviors are split into 4 instructions.
+ sfence_vma_zz{: rs1, rs2},
+ resources: {},
+ disasm: "sfence.vma", "%rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVPrivSFenceVmaZZ";
+ sfence_vma_zn{: rs1, rs2},
+ resources: {rs2},
+ disasm: "sfence.vma", "%rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVPrivSFenceVmaZN";
+ sfence_vma_nz{: rs1, rs2},
+ resources: { rs1 },
+ disasm: "sfence.vma", "%rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVPrivSFenceVmaNZ";
+ sfence_vma_nn{: rs1, rs2},
+ resources: {rs1, rs2},
+ disasm: "sfence.vma", "%rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RiscVPrivSFenceVmaNN";
+ // Skipping hypervisor memory management instructions for now.
+ }
+}
+
+slot riscv32_zbb {
+ default size = 4;
+ resources TwoOp = { next_pc, rs1 : rd[..rd]};
+ resources ThreeOp = { next_pc, rs1, rs2 : rd[..rd]};
+ includes {
+ #include "external/com_google_mpact-riscv/riscv/riscv_bitmanip_instructions.h"
+ }
+
+ opcodes {
+ // Logical with negate.
+ andn{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "andn", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVAndn";
+ orn{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "orn", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVOrn";
+ xnor{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "xor", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVXnor";
+ // Counte leading/trailing zero bits.
+ clz{: rs1 : rd},
+ resources: TwoOp,
+ disasm: "clz", "%rd, %rs1",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVClz";
+ ctz{: rs1 : rd},
+ resources: TwoOp,
+ disasm: "ctz", "%rd, %rs1",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVCtz";
+ // Count population
+ cpop{: rs1 : rd},
+ resources: TwoOp,
+ disasm: "cpop", "%rd, %rs1",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVCpop";
+ // Integer minimum/maximum.
+ max{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "max", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVMax";
+ maxu{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "max", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVMaxu";
+ min{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "min", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVMin";
+ minu{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "min", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVMinu";
+ // Sign and zero extension.
+ sext_b{: rs1 : rd},
+ resources: TwoOp,
+ disasm: "sext.b", "%rd, %rs1",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVSextB";
+ sext_h{: rs1 : rd},
+ resources: TwoOp,
+ disasm: "sext.h", "%rd, %rs1",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVSextH";
+ zext_h{: rs1 : rd},
+ resources: TwoOp,
+ disasm: "zext.h", "%rd, %rs1",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVZextH";
+ // Bitwise rotation.
+ rol{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "rol", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVRol";
+ ror{: rs1, rs2 : rd},
+ resources: ThreeOp,
+ disasm: "ror", "%rd, %rs1, %rs2",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVRor";
+ // OR combine.
+ orcb{: rs1 : rd},
+ resources: TwoOp,
+ disasm: "orc", "%rd, %rs1",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVOrcb";
+ // Byte reverse.
+ rev8{: rs1 : rd},
+ resources: TwoOp,
+ disasm: "rev8", "%rd, %rs1",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVRev8";
+ }
+}
+
+slot riscv32_zbb_imm {
+ default size = 4;
+ resources TwoOp = { next_pc, rs1 : rd[..rd]};
+ resources ThreeOp = { next_pc, rs1, rs2 : rd[..rd]};
+ opcodes {
+ rori{: rs1, r_uimm5 : rd},
+ resources: TwoOp,
+ disasm: "rori", "%rd, %rs1, %r_uimm5",
+ semfunc: "&::mpact::sim::riscv::RV32::RiscVRor";
+ }
+}
+
+slot riscv_zve32x {
+ includes {
+ #include "external/com_google_mpact-riscv/riscv/riscv_vector_memory_instructions.h"
+ #include "external/com_google_mpact-riscv/riscv/riscv_vector_opi_instructions.h"
+ #include "external/com_google_mpact-riscv/riscv/riscv_vector_opm_instructions.h"
+ #include "external/com_google_mpact-riscv/riscv/riscv_vector_permute_instructions.h"
+ #include "external/com_google_mpact-riscv/riscv/riscv_vector_reduction_instructions.h"
+ #include "external/com_google_mpact-riscv/riscv/riscv_vector_unary_instructions.h"
+ #include "absl/functional/bind_front.h"
+ }
+ default size = 4;
+ default latency = 0;
+ default opcode =
+ disasm: "Unimplemented instruction at 0x%(@:08x)",
+ semfunc: "&RV32VUnimplementedInstruction";
+ opcodes {
+ // Configuration.
+ vsetvli_xn{: rs1, zimm11: rd},
+ disasm: "vsetvli","%rd,", "%rs1, %zimm11",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vsetvl, /*rd_zero*/ false, /*rs1_zero*/ false)";
+ vsetvli_nz{: rs1, zimm11: rd},
+ disasm: "vsetvli", "%rd, %rs1, %zimm11",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vsetvl, /*rd_zero*/false, /*rs1_zero*/ true)";
+ vsetvli_zz{: rs1, zimm11: rd},
+ disasm: "vsetvli", "%rd, %rs1, %zimm11",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vsetvl, /*rd_zero*/true, /*rs1_zero*/ true)";
+ vsetivli{: uimm5, zimm10: rd},
+ disasm: "vsetivli %uimm5, %zimm10",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vsetvl, /*rd_zero*/false, /*rs1_zero*/ false)";
+ vsetvl_xn{: rs1, rs2: rd},
+ disasm: "vsetvl", "%rd, %rs1, %rs2",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vsetvl, /*rd_zero*/false, /*rs1_zero*/ false)";
+ vsetvl_nz{: rs1, rs2: rd},
+ disasm: "vsetvl", "%rd, %rs1, %rs2",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vsetvl, /*rd_zero*/false, /*rs1_zero*/ true)";
+ vsetvl_zz{: rs1, rs2: rd},
+ disasm: "vsetvl", "%rd, %rs1, %rs2",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vsetvl, /*rd_zero*/true, /*rs1_zero*/ true)";
+
+ // VECTOR LOADS
+
+ // Unit stride loads, masked (vm=0)
+ vle8{(: rs1, const1, vmask :), (: : vd )},
+ disasm: "vle8.v", "%vd, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 1)",
+ "&::mpact::sim::riscv::VlChild";
+ vle16{(: rs1, const2, vmask :), (: : vd )},
+ disasm: "vle16.v", "%vd, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 2)",
+ "&::mpact::sim::riscv::VlChild";
+ vle32{(: rs1, const4, vmask :), ( : : vd) },
+ disasm: "vle32.v", "%vd, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 4)",
+ "&::mpact::sim::riscv::VlChild";
+
+ // Unit stride loads, unmasked (vm=1)
+ vle8_vm1{(: rs1, const1, vmask_true :), (: : vd )},
+ disasm: "vle8.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 1)",
+ "&::mpact::sim::riscv::VlChild";
+ vle16_vm1{(: rs1, const2, vmask_true :), (: : vd )},
+ disasm: "vle16.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 2)",
+ "&::mpact::sim::riscv::VlChild";
+ vle32_vm1{(: rs1, const4, vmask_true :), ( : : vd) },
+ disasm: "vle32.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 4)",
+ "&::mpact::sim::riscv::VlChild";
+
+ // Vector strided loads
+ vlse8{(: rs1, rs2, vmask :), (: : vd)},
+ disasm: "vlse8.v", "%vd, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 1)",
+ "&::mpact::sim::riscv::VlChild";
+ vlse16{(: rs1, rs2, vmask :), (: : vd)},
+ disasm: "vlse16.v", "%vd, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 2)",
+ "&::mpact::sim::riscv::VlChild";
+ vlse32{(: rs1, rs2, vmask :), (: : vd)},
+ disasm: "vlse32.v", "%vd, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 4)",
+ "&::mpact::sim::riscv::VlChild";
+
+ // Vector mask load
+ vlm{(: rs1, const1, vmask_true :), (: : vd)},
+ disasm: "vlm.v", "%vd, (%rs1)",
+ semfunc: "&::mpact::sim::riscv::Vlm", "&::mpact::sim::riscv::VlChild";
+
+ // Unit stride vector load, fault first
+ vle8ff{(: rs1, const1, vmask:), (: : vd)},
+ disasm: "vle8ff.v", "%vd, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 1)",
+ "&::mpact::sim::riscv::VlChild";
+ vle16ff{(: rs1, const2, vmask:), (: : vd)},
+ disasm: "vle16ff.v", "%vd, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 2)",
+ "&::mpact::sim::riscv::VlChild";
+ vle32ff{(: rs1, const4, vmask:), (: : vd)},
+ disasm: "vle32ff.v", "%vd, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlStrided, /*element_width*/ 4)",
+ "&::mpact::sim::riscv::VlChild";
+
+ // Vector register load
+ vl1re8{(: rs1 :), (: : vd)},
+ disasm: "vl1re8.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 1, /*element_width*/ 1)",
+ "&::mpact::sim::riscv::VlChild";
+ vl1re16{(: rs1 :), (: : vd)},
+ disasm: "vl1re16.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 1, /*element_width*/ 2)",
+ "&::mpact::sim::riscv::VlChild";
+ vl1re32{(: rs1 :), (: : vd)},
+ disasm: "vl1re32.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 1, /*element_width*/ 4)",
+ "&::mpact::sim::riscv::VlChild";
+ vl2re8{(: rs1 :), (: : vd)},
+ disasm: "vl2re8.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 2, /*element_width*/ 1)",
+ "&::mpact::sim::riscv::VlChild";
+ vl2re16{(: rs1 :), (: : vd)},
+ disasm: "vl2re16.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 2, /*element_width*/ 2)",
+ "&::mpact::sim::riscv::VlChild";
+ vl2re32{(: rs1 :), (: : vd)},
+ disasm: "vl2re32.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 2, /*element_width*/ 4)",
+ "&::mpact::sim::riscv::VlChild";
+ vl4re8{(: rs1 :), (: : vd)},
+ disasm: "vl4re8.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 4, /*element_width*/ 1)",
+ "&::mpact::sim::riscv::VlChild";
+ vl4re16{(: rs1 :), (: : vd)},
+ disasm: "vl4re16.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 4, /*element_width*/ 2)",
+ "&::mpact::sim::riscv::VlChild";
+ vl4re32{(: rs1 :), (: : vd)},
+ disasm: "vl4re32.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 4, /*element_width*/ 4)",
+ "&::mpact::sim::riscv::VlChild";
+ vl8re8{(: rs1 :), (: : vd)},
+ disasm: "vl8re8.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 8, /*element_width*/ 1)",
+ "&::mpact::sim::riscv::VlChild";
+ vl8re16{(: rs1 :), (: : vd)},
+ disasm: "vl8re16.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 8, /*element_width*/ 2)",
+ "&::mpact::sim::riscv::VlChild";
+ vl8re32{(: rs1 :), (: : vd)},
+ disasm: "vl8re32.v", "%vd, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlRegister, /*num_regs*/ 8, /*element_width*/ 4)",
+ "&::mpact::sim::riscv::VlChild";
+
+ // Vector load, indexed, unordered.
+ vluxei8{(: rs1, vs2, vmask:), (: : vd)},
+ disasm: "vluxei8.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlIndexed, /*index_width*/ 1)",
+ "&::mpact::sim::riscv::VlChild";
+ vluxei16{(: rs1, vs2, vmask:), (: : vd)},
+ disasm: "vluxei16.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlIndexed, /*index_width*/ 2)",
+ "&::mpact::sim::riscv::VlChild";
+ vluxei32{(: rs1, vs2, vmask:), (: : vd)},
+ disasm: "vluxei32.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlIndexed, /*index_width*/ 4)",
+ "&::mpact::sim::riscv::VlChild";
+
+ // Vector load, indexed, ordered.
+ vloxei8{(: rs1, vs2, vmask:), (: : vd)},
+ disasm: "vloxei8.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlIndexed, /*index_width*/ 1)",
+ "&::mpact::sim::riscv::VlChild";
+ vloxei16{(: rs1, vs2, vmask:), (: : vd)},
+ disasm: "vloxei16.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlIndexed, /*index_width*/ 2)",
+ "&::mpact::sim::riscv::VlChild";
+ vloxei32{(: rs1, vs2, vmask:), (: : vd)},
+ disasm: "vloxei32.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlIndexed, /*index_width*/ 4)",
+ "&::mpact::sim::riscv::VlChild";
+
+ // Vector unit-stride segment load
+ vlsege8{(: rs1, vmask, nf:), (: nf : vd)},
+ disasm: "vlseg%nf\\e.v", "%vd, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegment, /*element_width*/ 1)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 1)";
+ vlsege16{(: rs1, vmask, nf:), (: nf : vd)},
+ disasm: "vlseg%nf\\e.v", "%vd, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegment, /*element_width*/ 2)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 2)";
+ vlsege32{(: rs1, vmask, nf:), (: nf : vd)},
+ disasm: "vlseg%nf\\e.v", "%vd, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegment, /*element_width*/ 4)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 4)";
+
+ // Vector strided segment load.
+ vlssege8{(: rs1, rs2, vmask, nf: ), (: nf : vd)},
+ disasm: "vlsseg%nf\\e8.v", "%vd, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegmentStrided, /*element_width*/ 1)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 1)";
+ vlssege16{(: rs1, rs2, vmask, nf: ), (: nf : vd)},/*element_width*/
+ disasm: "vlsseg%nf\\e16.v", "%vd, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegmentStrided, /*element_width*/ 2)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 2)";
+ vlssege32{(: rs1, rs2, vmask, nf: ), (: nf : vd)},
+ disasm: "vlsseg%nf\\e32.v", "%vd, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegmentStrided, /*element_width*/ 4)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 4)";
+
+ // Vector indexed segment load unordered.
+ vluxsegei8{(: rs1, vs2, vmask, nf :), (: nf : vd)},
+ disasm: "vluxseg%nf\\ei1.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegmentIndexed, /*index_width*/ 1)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 1)";
+ vluxsegei16{(: rs1, vs2, vmask, nf :), (: nf : vd)},
+ disasm: "vluxseg%nf\\ei2.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegmentIndexed, /*index_width*/ 2)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 2)";
+ vluxsegei32{(: rs1, vs2, vmask, nf :), (: nf : vd)},
+ disasm: "vluxseg%nf\\ei4.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegmentIndexed, /*index_width*/ 4)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 4)";
+
+ // Vector indexed segment load ordered.
+
+ vloxsegei8{(: rs1, vs2, vmask, nf :), (: nf : vd)},
+ disasm: "vluxseg%nf\\ei1.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegmentIndexed, /*index_width*/ 1)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 1)";
+ vloxsegei16{(: rs1, vs2, vmask, nf :), (: nf : vd)},
+ disasm: "vluxseg%nf\\ei2.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegmentIndexed, /*index_width*/ 2)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 2)";
+ vloxsegei32{(: rs1, vs2, vmask, nf :), (: nf : vd)},
+ disasm: "vluxseg%nf\\ei4.v", "%vd, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VlSegmentIndexed, /*index_width*/ 4)",
+ "absl::bind_front(&::mpact::sim::riscv::VlSegmentChild, /*element_width*/ 4)";
+
+ // VECTOR STORES
+
+ // Vector store, unit stride.
+ vse8{: vs3, rs1, const1, vmask : },
+ disasm: "vse8.v", "%vs3, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsStrided, /*element_width*/ 1)";
+ vse16{: vs3, rs1, const2, vmask : },
+ disasm: "vse16.v", "%vs3, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsStrided, /*element_width*/ 2)";
+ vse32{: vs3, rs1, const4, vmask : },
+ disasm: "vse32.v", "%vs3, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsStrided, /*element_width*/ 4)";
+
+ // Vector store mask
+ vsm{: vs3, rs1, const1, vmask_true:},
+ disasm: "vsm",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vsm)";
+
+ // Vector store, unit stride, fault first.
+ vse8ff{: vs3, rs1, const1, vmask:},
+ disasm: "vse8ff.v", "%vs3, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsStrided, /*element_width*/ 1)";
+ vse16ff{: vs3, rs1, const2, vmask:},
+ disasm: "vse16ff.v", "%vs3, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsStrided, /*element_width*/ 2)";
+ vse32ff{: vs3, rs1, const4, vmask:},
+ disasm: "vse32ff.v", "%vs3, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsStrided, /*element_width*/ 4)";
+
+ // Vector store register.
+ vs1re8{(: vs3, rs1 :)},
+ disasm: "vs1re8.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/ 1)";
+ vs1re16{(: vs3, rs1 :)},
+ disasm: "vs1re16.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/ 1)";
+ vs1re32{(: vs3, rs1 :)},
+ disasm: "vs1re32.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/ 1)";
+ vs2re8{(: vs3, rs1 :)},
+ disasm: "vs2re8.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/ 2)";
+ vs2re16{(: vs3, rs1 :)},
+ disasm: "vs2re16.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/ 2)";
+ vs2re32{(: vs3, rs1 :)},
+ disasm: "vs2re32.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/ 2)";
+ vs4re8{(: vs3, rs1 :)},
+ disasm: "vs4re8.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/ 4)";
+ vs4re16{(: vs3, rs1 :)},
+ disasm: "vs4re16.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/ 4)";
+ vs4re32{(: vs3, rs1 :)},
+ disasm: "vs4re32.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/ 4)";
+ vs8re8{(: vs3, rs1 :)},
+ disasm: "vs8re8.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/8)";
+ vs8re16{(: vs3, rs1 :)},
+ disasm: "vs8re16.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/8)";
+ vs8re32{(: vs3, rs1 :)},
+ disasm: "vs8re32.v", "%vs3, (%rs1)",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsRegister, /*num_regs*/8)";
+
+ // Vector store, strided.
+ vsse8{: vs3, rs1, rs2, vmask : },
+ disasm: "vsse8.v", "%vs3, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsStrided, /*element_width*/ 1)";
+ vsse16{: vs3, rs1, rs2, vmask : },
+ disasm: "vsse16.v", "%vs3, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsStrided, /*element_width*/ 2)";
+ vsse32{: vs3, rs1, rs2, vmask : },
+ disasm: "vsse32.v", "%vs3, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsStrided, /*element_width*/ 4)";
+
+ // Vector store, indexed, unordered.
+ vsuxei8{: vs3, rs1, vs2, vmask: },
+ disasm: "vsuxei8", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsIndexed, /*index_width*/ 1)";
+ vsuxei16{: vs3, rs1, vs2, vmask:},
+ disasm: "vsuxei16", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsIndexed, /*index_width*/ 2)";
+ vsuxei32{: vs3, rs1, vs2, vmask:},
+ disasm: "vsuxei32", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsIndexed, /*index_width*/ 4)";
+
+ // Vector store, indexed, unordered
+ vsoxei8{: vs3, rs1, vs2, vmask:},
+ disasm: "vsoxei8", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsIndexed, /*index_width*/ 1)";
+ vsoxei16{: vs3, rs1, vs2, vmask:},
+ disasm: "vsoxei16", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsIndexed, /*index_width*/ 2)";
+ vsoxei32{: vs3, rs1, vs2, vmask:},
+ disasm: "vsoxei32", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsIndexed, /*index_width*/ 4)";
+
+ // Vector unit-stride segment store.
+ vssege8{(: vs3, rs1, vmask, nf:)},
+ disasm: "vsseg%nf\\e.v", "%vs3, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegment, /*element_width*/ 1)";
+ vssege16{(: vs3, rs1, vmask, nf:)},
+ disasm: "vsseg%nf\\e.v", "%vs3, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegment, /*element_width*/ 2)";
+ vssege32{(: vs3, rs1, vmask, nf:)},
+ disasm: "vsseg%nf\\e.v", "%vs3, (%rs1), %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegment, /*element_width*/ 4)";
+
+ // Vector strided segment store.
+ vsssege8{(: vs3, rs1, rs2, vmask, nf: )},
+ disasm: "vssseg%nf\\e8.v", "%vs3, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegmentStrided, /*element_width*/ 1)";
+ vsssege16{(: vs3, rs1, rs2, vmask, nf: )},
+ disasm: "vssseg%nf\\e16.v", "%vs3, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegmentStrided, /*element_width*/ 2)";
+ vsssege32{(: vs3, rs1, rs2, vmask, nf: )},
+ disasm: "vssseg%nf\\e32.v", "%vs3, (%rs1), %rs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegmentStrided, /*element_width*/ 4)";
+
+ // Vector indexed segment store unordered.
+ vsuxsegei8{(: vs3, rs1, vs2, vmask, nf :)},
+ disasm: "vsuxseg%nf\\ei1.v", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegmentStrided, /*element_width*/ 1)";
+ vsuxsegei16{(: vs3, rs1, vs2, vmask, nf :)},
+ disasm: "vsuxseg%nf\\ei2.v", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegmentStrided, /*element_width*/ 2)";
+ vsuxsegei32{(: vs3, rs1, vs2, vmask, nf :)},
+ disasm: "vsuxseg%nf\\ei4.v", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegmentStrided, /*element_width*/ 4)";
+
+ // Vector indexed segment store ordered.
+ vsoxsegei8{(: vs3, rs1, vs2, vmask, nf :)},
+ disasm: "vsuxseg%nf\\ei1.v", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegmentIndexed, /*index_width*/ 1)";
+ vsoxsegei16{(: vs3, rs1, vs2, vmask, nf :)},
+ disasm: "vsuxseg%nf\\ei2.v", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegmentIndexed, /*index_width*/ 2)";
+ vsoxsegei32{(: vs3, rs1, vs2, vmask, nf :)},
+ disasm: "vsuxseg%nf\\ei4.v", "%vs3, (%rs1), %vs2, %vmask",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::VsSegmentIndexed, /*index_width*/ 4)";
+
+ // Integer OPIVV, OPIVX, OPIVI.
+ vadd_vv{: vs2, vs1, vmask : vd},
+ disasm: "vadd.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vadd";
+ vadd_vx{: vs2, rs1, vmask : vd},
+ disasm: "vadd.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vadd";
+ vadd_vi{: vs2, simm5, vmask : vd},
+ disasm: "vadd.vi", "%vd, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vadd";
+ vsub_vv{: vs2, vs1, vmask : vd},
+ disasm: "vsub.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsub";
+ vsub_vx{: vs2, rs1, vmask : vd},
+ disasm: "vsub.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsub";
+ vrsub_vx{: vs2, rs1, vmask : vd},
+ disasm: "vrsub.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vrsub";
+ vrsub_vi{: vs2, simm5, vmask, vd},
+ disasm: "vrsub.vi", "%vd, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vrsub";
+ vminu_vv{: vs2, vs1, vmask : vd},
+ disasm: "vminu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vminu";
+ vminu_vx{: vs2, rs1, vmask : vd},
+ disasm: "vminu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vminu";
+ vmin_vv{: vs2, vs1, vmask : vd},
+ disasm: "vmin.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmin";
+ vmin_vx{: vs2, rs1, vmask : vd},
+ disasm: "vmin.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmin";
+ vmaxu_vv{: vs2, vs1, vmask : vd},
+ disasm: "vmax.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmaxu";
+ vmaxu_vx{: vs2, rs1, vmask : vd},
+ disasm: "vmax.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmaxu";
+ vmax_vv{: vs2, vs1, vmask : vd},
+ disasm: "vmax.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmax";
+ vmax_vx{: vs2, rs1, vmask : vd},
+ disasm: "vmax.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmax";
+ vand_vv{: vs2, vs1, vmask : vd},
+ disasm: "vand.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vand";
+ vand_vx{: vs2, rs1, vmask : vd},
+ disasm: "vand.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vand";
+ vand_vi{: vs2, simm5, vmask : vd},
+ disasm: "vand.vi", "%vd, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vand";
+ vor_vv{: vs2, vs1, vmask : vd},
+ disasm: "vor.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vor";
+ vor_vx{: vs2, rs1, vmask : vd},
+ disasm: "vor.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vor";
+ vor_vi{: vs2, simm5, vmask : vd},
+ disasm: "vor.vi", "%vd, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vor";
+ vxor_vv{: vs2, vs1, vmask : vd},
+ disasm: "vxor.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vxor";
+ vxor_vx{: vs2, rs1, vmask : vd},
+ disasm: "vxor.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vxor";
+ vxor_vi{: vs2, simm5, vmask : vd},
+ disasm: "vxor.vi", "%vd, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vxor";
+ vrgather_vv{: vs2, vs1, vmask: vd},
+ disasm: "vrgather.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vrgather";
+ vrgather_vx{: vs2, rs1, vmask: vd},
+ disasm: "vrgather.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vrgather";
+ vrgather_vi{: vs2, uimm5, vmask: vd},
+ disasm: "vrgather.vi", "%vd, %uimm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vrgather";
+ vrgatherei16_vv{: vs2, vs1, vmask: vd},
+ disasm: "vrgatherei16.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vrgatherei16";
+ vslideup_vx{: vs2, rs1, vmask: vd},
+ disasm: "vslideup.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vslideup";
+ vslideup_vi{: vs2, uimm5, vmask: vd},
+ disasm: "vslideup.vi", "%vd, %vs2, %uimm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vslideup";
+ vslidedown_vx{: vs2, rs1, vmask: vd},
+ disasm: "vslidedown.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vslidedown";
+ vslidedown_vi{: vs2, uimm5, vmask: vd},
+ disasm: "vslidedown.vi", "%vd, %vs2, %uimm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vslidedown";
+ vadc_vv{: vs2, vs1, vmask: vd},
+ disasm: "vadc.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vadc";
+ vadc_vx{: vs2, rs1, vmask: vd},
+ disasm: "vadc.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vadc";
+ vadc_vi{: vs2, simm5, vmask: vd},
+ disasm: "vadc.vi", "%vd, %vs2, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vadc";
+ vmadc_vv{: vs2, vs1, vmask, vm: vd},
+ disasm: "vmadc.vv", "%vd, %vs2, %vs1, %vmask, %vm ",
+ semfunc: "&::mpact::sim::riscv::Vmadc";
+ vmadc_vx{: vs2, rs1, vmask, vm: vd},
+ disasm: "vmadc.vx", "%vd, %vs2, %rs1, %vmask, %vm",
+ semfunc: "&::mpact::sim::riscv::Vmadc";
+ vmadc_vi{: vs2, simm5, vmask, vm: vd},
+ disasm: "vmadc.vi", "%vd, %vs2, %simm5, %vmask, %vm",
+ semfunc: "&::mpact::sim::riscv::Vmadc";
+ vsbc_vv{: vs2, vs1, vmask: vd},
+ disasm: "vsbc.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsbc";
+ vsbc_vx{: vs2, rs1, vmask: vd},
+ disasm: "vsbc.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsbc";
+ vmsbc_vv{: vs2, vs1, vmask, vm: vd},
+ disasm: "vmsbc.vv", "%vd, %vs2, %vs1, %vmask, %vm",
+ semfunc: "&::mpact::sim::riscv::Vmsbc";
+ vmsbc_vx{: vs2, rs1, vmask, vm: vd},
+ disasm: "vmsbc.vx", "%vd, %vs2, %rs1, %vmask, %vm",
+ semfunc: "&::mpact::sim::riscv::Vmsbc";
+ vmerge_vv{: vs2, vs1, vmask: vd},
+ disasm: "vmerge.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmerge";
+ vmerge_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmerge.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmerge";
+ vmerge_vi{: vs2, simm5, vmask: vd},
+ disasm: "vmerge.vi", "%vd, %vs2, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmerge";
+ vmv_vv{: vs2, vs1, vmask_true: vd},
+ disasm: "vmv.vv", "%vd, %vs1",
+ semfunc: "&::mpact::sim::riscv::Vmerge";
+ vmv_vx{: vs2, rs1, vmask_true: vd},
+ disasm: "vmv.vx", "%vd, %rs1",
+ semfunc: "&::mpact::sim::riscv::Vmerge";
+ vmv_vi{: vs2, simm5, vmask_true: vd},
+ disasm: "vmv.vi", "%vd, %simm5",
+ semfunc: "&::mpact::sim::riscv::Vmerge";
+ vmseq_vv{: vs2, vs1, vmask: vd},
+ disasm: "vmseq.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmseq";
+ vmseq_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmseq.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmseq";
+ vmseq_vi{: vs2, simm5, vmask: vd},
+ disasm: "vmseq.vi", "%vd, %vs2, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmseq";
+ vmsne_vv{: vs2, vs1, vmask: vd},
+ disasm: "vmsne.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsne";
+ vmsne_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmsne.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsne";
+ vmsne_vi{: vs2, simm5, vmask: vd},
+ disasm: "vmsne.vi", "%vd, %vs2, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsne";
+ vmsltu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vmsltu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsltu";
+ vmsltu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmsltu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsltu";
+ vmslt_vv{: vs2, vs1, vmask: vd},
+ disasm: "vmslt.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmslt";
+ vmslt_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmslt.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmslt";
+ vmsleu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vmsleu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsleu";
+ vmsleu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmsleu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsleu";
+ vmsleu_vi{: vs2, simm5, vmask: vd},
+ disasm: "vmsleu.vi", "%vd, %vs2, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsleu";
+ vmsle_vv{: vs2, vs1, vmask: vd},
+ disasm: "vmsle.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsle";
+ vmsle_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmsle.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsle";
+ vmsle_vi{: vs2, simm5, vmask: vd},
+ disasm: "vmsle.vi", "%vd, %vs2, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsle";
+ vmsgtu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmsgtu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsgtu";
+ vmsgtu_vi{: vs2, simm5, vmask: vd},
+ disasm: "vmsgtu.vi", "%vd, %vs2, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsgtu";
+ vmsgt_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmsgt.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsgt";
+ vmsgt_vi{: vs2, simm5, vmask: vd},
+ disasm: "vmsgt.vi", "%vd, %vs2, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsgt";
+ vsaddu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vsaddu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsaddu";
+ vsaddu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vsaddu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsaddu";
+ vsaddu_vi{: vs2, simm5, vmask: vd},
+ disasm: "vsaddu.vi", "%vd, %vs2, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsaddu";
+ vsadd_vv{: vs2, vs1, vmask: vd},
+ disasm: "vsadd.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsadd";
+ vsadd_vx{: vs2, rs1, vmask: vd},
+ disasm: "vsadd.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsadd";
+ vsadd_vi{: vs2, simm5, vmask: vd},
+ disasm: "vsadd.vi", "%vd, %vs2, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsadd";
+ vssubu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vssubu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vssubu";
+ vssubu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vssubu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vssubu";
+ vssub_vv{: vs2, vs1, vmask: vd},
+ disasm: "vssub.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vssub";
+ vssub_vx{: vs2, rs1, vmask: vd},
+ disasm: "vssub.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vssub";
+ vsll_vv{: vs2, vs1, vmask : vd},
+ disasm: "vsll.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsll";
+ vsll_vx{: vs2, rs1, vmask : vd},
+ disasm: "vsll.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsll";
+ vsll_vi{: vs2, simm5, vmask: vd},
+ disasm: "vsll.vi", "%vd, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsll";
+ vsmul_vv{: vs2, vs1, vmask : vd},
+ disasm: "vsmul.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsmul";
+ vsmul_vx{: vs2, rs1, vmask : vd},
+ disasm: "vsmul.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsmul";
+ vmv1r_vi{: vs2 : vd},
+ disasm: "vmv1r.vi", "%vd, %vs2",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vmvr, 1)";
+ vmv2r_vi{: vs2 : vd},
+ disasm: "vmv2r.vi", "%vd, %vs2",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vmvr, 2)";
+ vmv4r_vi{: vs2 : vd},
+ disasm: "vmv4r.vi", "%vd, %vs2",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vmvr, 4)";
+ vmv8r_vi{: vs2 : vd},
+ disasm: "vmv8r.vi", "%vd, %vs2",
+ semfunc: "absl::bind_front(&::mpact::sim::riscv::Vmvr, 8)";
+ vsrl_vv{: vs2, vs1, vmask : vd},
+ disasm: "vsrl.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsrl";
+ vsrl_vx{: vs2, rs1, vmask : vd},
+ disasm: "vsrl.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsrl";
+ vsrl_vi{: vs2, simm5, vmask: vd},
+ disasm: "vsrl.vi", "%vd, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsrl";
+ vsra_vv{: vs2, vs1, vmask : vd},
+ disasm: "vsra.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsra";
+ vsra_vx{: vs2, rs1, vmask : vd},
+ disasm: "vsra.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsra";
+ vsra_vi{: vs2, simm5, vmask: vd},
+ disasm: "vsra.vi", "%vd, %simm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsra";
+ vssrl_vv{: vs2, vs1, vmask: vd},
+ disasm: "vssrl.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vssrl";
+ vssrl_vx{: vs2, rs1, vmask: vd},
+ disasm: "vssrl.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vssrl";
+ vssrl_vi{: vs2, uimm5, vmask: vd},
+ disasm: "vssrl.vi", "%vd, %vs2, %uimm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vssrl";
+ vssra_vv{: vs2, vs1, vmask: vd},
+ disasm: "vssra.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vssra";
+ vssra_vx{: vs2, rs1, vmask: vd},
+ disasm: "vssra.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vssra";
+ vssra_vi{: vs2, uimm5, vmask: vd},
+ disasm: "vssra.vi", "%vd, %vs2, %uimm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vssra";
+ vnsrl_vv{: vs2, vs1, vmask : vd},
+ disasm: "vnsrl.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnsrl";
+ vnsrl_vx{: vs2, rs1, vmask : vd},
+ disasm: "vnsrl.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnsrl";
+ vnsrl_vi{: vs2, uimm5, vmask : vd},
+ disasm: "vnsrl.vi", "%vd, %vs2, %uimm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnsrl";
+ vnsra_vv{: vs2, vs1, vmask : vd},
+ disasm: "vnsra.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnsra";
+ vnsra_vx{: vs2, rs1, vmask : vd},
+ disasm: "vnsra.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnsra";
+ vnsra_vi{: vs2, uimm5, vmask : vd},
+ disasm: "vnsra.vi", "%vd, %vs2, %uimm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnsra";
+ vnclipu_vv{: vs2, vs1, vmask : vd},
+ disasm: "vnclipu_vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnclipu";
+ vnclipu_vx{: vs2, rs1, vmask : vd},
+ disasm: "vnclipu_vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnclipu";
+ vnclipu_vi{: vs2, uimm5, vmask : vd},
+ disasm: "vnclipu_vi", "%vd, %vs2, %uimm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnclipu";
+ vnclip_vv{: vs2, vs1, vmask : vd},
+ disasm: "vnclip_vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnclip";
+ vnclip_vx{: vs2, rs1, vmask : vd},
+ disasm: "vnclip_vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnclip";
+ vnclip_vi{: vs2, uimm5, vmask : vd},
+ disasm: "vnclip_vi", "%vd, %vs2, %uimm5, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnclip";
+ vwredsumu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vwredsumu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwredsumu";
+ vwredsum_vv{: vs2, vs1, vmask: vd},
+ disasm: "vwredsum.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwredsum";
+
+ // Integer OPMVV, OPMVX.
+ vredsum_vv{: vs2, vs1, vmask: vd},
+ disasm: "vredsum.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vredsum";
+ vredand_vv{: vs2, vs1, vmask: vd},
+ disasm: "vredand.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vredand";
+ vredor_vv{: vs2, vs1, vmask: vd},
+ disasm: "vredor.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vredor";
+ vredxor_vv{: vs2, vs1, vmask: vd},
+ disasm: "vredxor.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vredxor";
+ vredminu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vredminu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vredminu";
+ vredmin_vv{: vs2, vs1, vmask: vd},
+ disasm: "vredmin.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vredmin";
+ vredmaxu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vredmaxu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vredmaxu";
+ vredmax_vv{: vs2, vs1, vmask: vd},
+ disasm: "vredmax.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vredmax";
+ vaaddu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vaaddu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vaaddu";
+ vaaddu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vaaddu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vaaddu";
+ vaadd_vv{: vs2, vs1, vmask: vd},
+ disasm: "vaadd.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vaadd";
+ vaadd_vx{: vs2, rs1, vmask: vd},
+ disasm: "vaadd.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vaadd";
+ vasubu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vasubu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vasubu";
+ vasubu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vasubu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vasubu";
+ vasub_vv{: vs2, vs1, vmask: vd},
+ disasm: "vasub.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vasub";
+ vasub_vx{: vs2, rs1, vmask: vd},
+ disasm: "vasub.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vasub";
+ vslide1up_vx{: vs2, rs1, vmask: vd},
+ disasm: "vslide1up.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vslide1up";
+ vslide1down_vx{: vs2, rs1, vmask: vd},
+ disasm: "vslide1down.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vslide1down";
+ vcompress_vv{: vs2, vs1: vd},
+ disasm: "vcompress.vv", "%vd, %vs2, %vs1",
+ semfunc: "&::mpact::sim::riscv::Vcompress";
+ vmandnot_vv{: vs2, vs1: vd},
+ disasm: "vwmandnot.vv", "%vd, %vs2, %vs1",
+ semfunc: "&::mpact::sim::riscv::Vmandnot";
+ vmand_vv{: vs2, vs1: vd},
+ disasm: "vmand.vv", "%vd, %vs2, %vs1",
+ semfunc: "&::mpact::sim::riscv::Vmand";
+ vmor_vv{: vs2, vs1: vd},
+ disasm: "vmor.vv", "%vd, %vs2, %vs1",
+ semfunc: "&::mpact::sim::riscv::Vmor";
+ vmxor_vv{: vs2, vs1: vd},
+ disasm: "vmxor.vv", "%vd, %vs2, %vs1",
+ semfunc: "&::mpact::sim::riscv::Vmxor";
+ vmornot_vv{: vs2, vs1: vd},
+ disasm: "vmornot.vv", "%vd, %vs2, %vs1",
+ semfunc: "&::mpact::sim::riscv::Vmornot";
+ vmnand_vv{: vs2, vs1: vd},
+ disasm: "vmnand.vv", "%vd, %vs2, %vs1",
+ semfunc: "&::mpact::sim::riscv::Vmnand";
+ vmnor_vv{: vs2, vs1: vd},
+ disasm: "vmnor.vv", "%vd, %vs2, %vs1",
+ semfunc: "&::mpact::sim::riscv::Vmnor";
+ vmxnor_vv{: vs2, vs1: vd},
+ disasm: "vmxnor.vv", "%vd, %vs2, %vs1",
+ semfunc: "&::mpact::sim::riscv::Vmxnor";
+ vdivu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vdivu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vdivu";
+ vdivu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vdivu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vdivu";
+ vdiv_vv{: vs2, vs1, vmask: vd},
+ disasm: "vdiv.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vdiv";
+ vdiv_vx{: vs2, rs1, vmask: vd},
+ disasm: "vdiv.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vdiv";
+ vremu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vremu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vremu";
+ vremu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vremu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vremu";
+ vrem_vv{: vs2, vs1, vmask: vd},
+ disasm: "vrem.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vrem";
+ vrem_vx{: vs2, rs1, vmask: vd},
+ disasm: "vrem.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vrem";
+ vmulhu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vmulhu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmulhu";
+ vmulhu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmulhu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmulhu";
+ vmul_vv{: vs2, vs1, vmask: vd},
+ disasm: "vmul.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmul";
+ vmul_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmul.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmul";
+ vmulhsu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vmulhsu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmulhsu";
+ vmulhsu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmulhsu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmulhsu";
+ vmulh_vv{: vs2, vs1, vmask: vd},
+ disasm: "vmulh.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmulh";
+ vmulh_vx{: vs2, rs1, vmask: vd},
+ disasm: "vmulh.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmulh";
+ vmadd_vv{: vs2, vs1, vd, vmask: vd},
+ disasm: "vmadd.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmadd";
+ vmadd_vx{: vs2, rs1, vd, vmask: vd},
+ disasm: "vmadd.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmadd";
+ vnmsub_vv{: vs2, vs1, vd, vmask: vd},
+ disasm: "vnmsub.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnmsub";
+ vnmsub_vx{: vs2, rs1, vd, vmask: vd},
+ disasm: "vnmsub.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnmsub";
+ vmacc_vv{: vs2, vs1, vd, vmask: vd},
+ disasm: "vmacc.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmacc";
+ vmacc_vx{: vs2, rs1, vd, vmask: vd},
+ disasm: "vmacc.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmacc";
+ vnmsac_vv{: vs2, vs1, vd, vmask: vd},
+ disasm: "vnmsac.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnmsac";
+ vnmsac_vx{: vs2, rs1, vd, vmask: vd},
+ disasm: "vnmsac.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vnmsac";
+ vwaddu_vv{: vs2, vs1, vmask : vd},
+ disasm: "vwaddu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwaddu";
+ vwaddu_vx{: vs2, rs1, vmask : vd},
+ disasm: "vwaddu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwaddu";
+ vwadd_vv{: vs2, vs1, vmask : vd},
+ disasm: "vwadd_vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwadd";
+ vwadd_vx{: vs2, rs1, vmask : vd},
+ disasm: "vwadd.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwadd";
+ vwsubu_vv{: vs2, vs1, vmask : vd},
+ disasm: "vwsubu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwsubu";
+ vwsubu_vx{: vs2, rs1, vmask : vd},
+ disasm: "vwsubu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwsubu";
+ vwsub_vv{: vs2, vs1, vmask : vd},
+ disasm: "vwsub.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwsub";
+ vwsub_vx{: vs2, rs1, vmask : vd},
+ disasm: "vwsub.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwsub";
+ vwaddu_w_vv{: vs2, vs1, vmask : vd},
+ disasm: "vwaddu.wv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwadduw";
+ vwaddu_w_vx{: vs2, rs1, vmask : vd},
+ disasm: "vwaddu.wx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwadduw";
+ vwadd_w_vv{: vs2, vs1, vmask : vd},
+ disasm: "vwadd.wv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwaddw";
+ vwadd_w_vx{: vs2, rs1, vmask : vd},
+ disasm: "vwadd.wx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwaddw";
+ vwsubu_w_vv{: vs2, vs1, vmask : vd},
+ disasm: "vwsubu.wv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwsubuw";
+ vwsubu_w_vx{: vs2, rs1, vmask : vd},
+ disasm: "vwsubu.wx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwsubuw";
+ vwsub_w_vv{: vs2, vs1, vmask : vd},
+ disasm: "vwsub.wv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwsubw";
+ vwsub_w_vx{: vs2, rs1, vmask : vd},
+ disasm: "vwsub.wx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwsubw";
+ vwmulu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vwmulu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmulu";
+ vwmulu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vwmulu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmulu";
+ vwmulsu_vv{: vs2, vs1, vmask: vd},
+ disasm: "vwmulsu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmulsu";
+ vwmulsu_vx{: vs2, rs1, vmask: vd},
+ disasm: "vwmulsu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmulsu";
+ vwmul_vv{: vs2, vs1, vmask: vd},
+ disasm: "vwmul.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmul";
+ vwmul_vx{: vs2, rs1, vmask: vd},
+ disasm: "vwmul.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmul";
+ vwmaccu_vv{: vs2, vs1, vd, vmask: vd},
+ disasm: "vwmaccu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmaccu";
+ vwmaccu_vx{: vs2, rs1, vd, vmask: vd},
+ disasm: "vwmaccu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmaccu";
+ vwmacc_vv{: vs2, vs1, vd, vmask: vd},
+ disasm: "vwmacc.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmacc";
+ vwmacc_vx{: vs2, rs1, vd, vmask: vd},
+ disasm: "vwmacc.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmacc";
+ vwmaccus_vv{: vs2, vs1, vd, vmask: vd},
+ disasm: "vwmaccus.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmaccus";
+ vwmaccus_vx{: vs2, rs1, vd, vmask: vd},
+ disasm: "vwmaccus.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmaccus";
+ vwmaccsu_vv{: vs2, vs1, vd, vmask: vd},
+ disasm: "vwmaccsu.vv", "%vd, %vs2, %vs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmaccsu";
+ vwmaccsu_vx{: vs2, rs1, vd, vmask: vd},
+ disasm: "vwmaccsu.vx", "%vd, %vs2, %rs1, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vwmaccsu";
+
+ // VWXUNARY0
+ vmv_x_s{: vs2 : rd},
+ disasm: "vmv.x.s", "%rd, %vs2",
+ semfunc: "&::mpact::sim::riscv::VmvToScalar";
+ vcpop{: vs2, vmask: rd},
+ disasm: "vcpop", "%rd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vcpop";
+ vfirst{: vs2, vmask: rd},
+ disasm: "vfirst", "%rd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vfirst";
+ // VRXUNARY0
+ vmv_s_x{: rs1 : vd},
+ disasm: "vmv.s.x", "%vd, %rs1",
+ semfunc: "&::mpact::sim::riscv::VmvFromScalar";
+ // VXUNARY0
+ vzext_vf8{: vs2, vmask: vd},
+ disasm: "vzext.vf8", "%vd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vzext8";
+ vsext_vf8{: vs2, vmask: vd},
+ disasm: "vsext.vf8", "%vd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsext8";
+ vzext_vf4{: vs2, vmask: vd},
+ disasm: "vzext.vf4", "%vd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vzext4";
+ vsext_vf4{: vs2, vmask: vd},
+ disasm: "vsext.vf4", "%vd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsext4";
+ vzext_vf2{: vs2, vmask: vd},
+ disasm: "vzext.vf2", "%vd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vzext2";
+ vsext_vf2{: vs2, vmask: vd},
+ disasm: "vsext.vf2", "%vd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vsext2";
+ // VMUNARY0
+ vmsbf{:vs2, vmask: vd},
+ disasm: "vmsbf.m", "%vd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsbf";
+ vmsof{:vs2, vmask: vd},
+ disasm: "vmsof.m", "%vd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsof";
+ vmsif{:vs2, vmask: vd},
+ disasm: "vmsif.m", "%vd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vmsif";
+ viota{:vs2, vmask: vd},
+ disasm: "viota.m", "%vd, %vs2, %vmask",
+ semfunc: "&::mpact::sim::riscv::Viota";
+ vid{: vmask: vd},
+ disasm: "vid.v", "%vd, %vmask",
+ semfunc: "&::mpact::sim::riscv::Vid";
+ }
+}
+
+
+// Combining all kelvin instruction sets.
+slot kelvin_v2 : riscv32i, riscv32_hints, riscv32m, riscv32f, zicsr, zfencei,
+ privileged, riscv32_zbb, riscv32_zbb_imm, riscv_zve32x {
+ default opcode =
+ disasm: "Illegal instruction at 0x%(@:08x)",
+ semfunc: "&::mpact::sim::riscv::RiscVIUnimplemented";
+ opcodes {
+ mpause{},
+ disasm: "mpause",
+ // mpause is the software breakpoint to terminate the program.
+ // TODO: b/441594698 - Add a real implementation for mpause.
+ semfunc: "&::mpact::sim::riscv::RiscVIUnimplemented";
+ }
+}
diff --git a/sim/kelvin_v2_encoding.cc b/sim/kelvin_v2_encoding.cc
new file mode 100644
index 0000000..a651b7d
--- /dev/null
+++ b/sim/kelvin_v2_encoding.cc
@@ -0,0 +1,68 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "sim/kelvin_v2_encoding.h"
+
+#include <cstdint>
+
+#include "sim/kelvin_v2_bin_decoder.h"
+#include "sim/kelvin_v2_getters.h"
+#include "sim/kelvin_v2_state.h"
+#include "absl/base/nullability.h"
+#include "riscv/riscv_encoding_common.h"
+#include "mpact/sim/generic/operand_interface.h"
+#include "mpact/sim/generic/type_helpers.h"
+
+namespace kelvin::sim {
+
+using ::kelvin::sim::KelvinV2State;
+using ::mpact::sim::generic::DestinationOperandInterface;
+using ::mpact::sim::generic::operator*; // NOLINT
+using ::mpact::sim::generic::SourceOperandInterface;
+using Extractors = ::kelvin::sim::encoding::Extractors;
+
+KelvinV2Encoding::KelvinV2Encoding(KelvinV2State* /*absl_nonnull*/ state)
+ : RiscVEncodingCommon(), state_(state) {
+ source_op_getters_.emplace(
+ *SourceOpEnum::kNone,
+ []() -> SourceOperandInterface* { return nullptr; });
+ dest_op_getters_.emplace(
+ *DestOpEnum::kNone,
+ [](int latency) -> DestinationOperandInterface* { return nullptr; });
+ // Add Kelvin V2 ISA source operand getters.
+ AddKelvinV2SourceGetters<SourceOpEnum, Extractors>(source_op_getters_, this);
+ // Add Kelvin V2 ISA destination operand getters.
+ AddKelvinV2DestGetters<DestOpEnum, Extractors>(dest_op_getters_, this);
+}
+
+SourceOperandInterface* KelvinV2Encoding::GetSource(SlotEnum, int, OpcodeEnum,
+ SourceOpEnum op, int) {
+ auto const& iter = source_op_getters_.find(*op);
+ if (iter == source_op_getters_.end()) return nullptr;
+ return iter->second();
+}
+
+DestinationOperandInterface* KelvinV2Encoding::GetDestination(
+ SlotEnum, int, OpcodeEnum, DestOpEnum op, int, int latency) {
+ auto const& iter = dest_op_getters_.find(*op);
+ if (iter == dest_op_getters_.end()) return nullptr;
+ return iter->second(latency);
+}
+
+void KelvinV2Encoding::ParseInstruction(uint32_t inst_word) {
+ inst_word_ = inst_word;
+ opcode_ = ::kelvin::sim::encoding::DecodeKelvinV2Inst32(inst_word_);
+}
+
+} // namespace kelvin::sim
diff --git a/sim/kelvin_v2_encoding.h b/sim/kelvin_v2_encoding.h
new file mode 100644
index 0000000..a50e7e2
--- /dev/null
+++ b/sim/kelvin_v2_encoding.h
@@ -0,0 +1,97 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SIM_KELVIN_V2_ENCODING_H_
+#define SIM_KELVIN_V2_ENCODING_H_
+
+#include <cstdint>
+
+#include "sim/kelvin_v2_decoder.h"
+#include "sim/kelvin_v2_enums.h"
+#include "sim/kelvin_v2_state.h"
+#include "absl/base/nullability.h"
+#include "absl/container/flat_hash_map.h"
+#include "absl/functional/any_invocable.h"
+#include "riscv/riscv_encoding_common.h"
+#include "mpact/sim/generic/operand_interface.h"
+#include "mpact/sim/generic/simple_resource.h"
+
+namespace kelvin::sim {
+
+class KelvinV2Encoding : public ::kelvin::sim::isa32_v2::KelvinV2EncodingBase,
+ public ::mpact::sim::riscv::RiscVEncodingCommon {
+ public:
+ using DestinationOperandInterface =
+ ::mpact::sim::generic::DestinationOperandInterface;
+ using SimpleResourcePool = ::mpact::sim::generic::SimpleResourcePool;
+ using SourceOperandInterface = ::mpact::sim::generic::SourceOperandInterface;
+ using KelvinV2State = ::kelvin::sim::KelvinV2State;
+
+ using SourceOpGetterMap =
+ absl::flat_hash_map<int, absl::AnyInvocable<SourceOperandInterface*()>>;
+ using DestOpGetterMap = absl::flat_hash_map<
+ int, absl::AnyInvocable<DestinationOperandInterface*(int)>>;
+
+ using OpcodeEnum = ::kelvin::sim::isa32_v2::OpcodeEnum;
+ using SlotEnum = ::kelvin::sim::isa32_v2::SlotEnum;
+ using SourceOpEnum = ::kelvin::sim::isa32_v2::SourceOpEnum;
+ using DestOpEnum = ::kelvin::sim::isa32_v2::DestOpEnum;
+
+ explicit KelvinV2Encoding(KelvinV2State* /*absl_nonnull*/ state);
+
+ // Based on KelvinV2EncodingBase
+ OpcodeEnum GetOpcode(SlotEnum, int) override { return opcode_; }
+
+ // The following method returns a source operand that corresponds to the
+ // particular operand field.
+ SourceOperandInterface* GetSource(SlotEnum, int, OpcodeEnum, SourceOpEnum op,
+ int source_no) override;
+
+ // The following method returns a destination operand that corresponds to the
+ // particular operand field.
+ DestinationOperandInterface* GetDestination(SlotEnum, int, OpcodeEnum,
+ DestOpEnum op, int dest_no,
+ int latency) override;
+
+ // This method returns latency for any destination operand for which the
+ // latency specifier in the .isa file is '*'. Since there are none, just
+ // return 0.
+ int GetLatency(SlotEnum, int, OpcodeEnum, DestOpEnum, int) override {
+ return 0;
+ }
+
+ // Based on RiscVEncodingCommon
+ KelvinV2State* state() const override { return state_; }
+
+ SimpleResourcePool* resource_pool() override { return nullptr; }
+
+ uint32_t inst_word() const override { return inst_word_; }
+
+ // Parses an instruction and determines the opcode.
+ void ParseInstruction(uint32_t inst_word);
+
+ const SourceOpGetterMap& source_op_getters() { return source_op_getters_; }
+ const DestOpGetterMap& dest_op_getters() { return dest_op_getters_; }
+
+ private:
+ uint32_t inst_word_;
+ OpcodeEnum opcode_;
+ KelvinV2State* state_;
+ SourceOpGetterMap source_op_getters_;
+ DestOpGetterMap dest_op_getters_;
+};
+
+} // namespace kelvin::sim
+
+#endif // SIM_KELVIN_V2_ENCODING_H_
diff --git a/sim/kelvin_v2_getters.h b/sim/kelvin_v2_getters.h
new file mode 100644
index 0000000..e970f7a
--- /dev/null
+++ b/sim/kelvin_v2_getters.h
@@ -0,0 +1,318 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SIM_KELVIN_V2_GETTERS_H_
+#define SIM_KELVIN_V2_GETTERS_H_
+
+#include <cstdint>
+
+#include "absl/base/nullability.h"
+#include "absl/container/flat_hash_map.h"
+#include "absl/functional/any_invocable.h"
+#include "absl/status/statusor.h"
+#include "absl/strings/str_cat.h"
+#include "riscv/riscv_csr.h"
+#include "riscv/riscv_encoding_common.h"
+#include "riscv/riscv_getter_helpers.h"
+#include "riscv/riscv_getters_vector.h"
+#include "riscv/riscv_register.h"
+#include "riscv/riscv_register_aliases.h"
+#include "riscv/riscv_state.h"
+#include "mpact/sim/generic/immediate_operand.h"
+#include "mpact/sim/generic/literal_operand.h"
+#include "mpact/sim/generic/operand_interface.h"
+#include "mpact/sim/generic/type_helpers.h"
+
+namespace kelvin::sim {
+using ::mpact::sim::generic::DestinationOperandInterface;
+using ::mpact::sim::generic::ImmediateOperand;
+using ::mpact::sim::generic::IntLiteralOperand;
+using ::mpact::sim::generic::operator*; // NOLINT
+using ::mpact::sim::generic::SourceOperandInterface;
+using ::mpact::sim::riscv::GetVectorRegisterSourceOp;
+using ::mpact::sim::riscv::Insert;
+using ::mpact::sim::riscv::kFRegisterAliases;
+using ::mpact::sim::riscv::kXRegisterAliases;
+using ::mpact::sim::riscv::RiscVCsrInterface;
+using ::mpact::sim::riscv::RiscVEncodingCommon;
+using ::mpact::sim::riscv::RiscVState;
+using ::mpact::sim::riscv::RV32Register;
+using ::mpact::sim::riscv::RV32VectorTrueOperand;
+using ::mpact::sim::riscv::RVFpRegister;
+using ::mpact::sim::riscv::RVVectorRegister;
+
+using SourceOpGetterMap =
+ absl::flat_hash_map<int, absl::AnyInvocable<SourceOperandInterface*()>>;
+using DestOpGetterMap =
+ absl::flat_hash_map<int,
+ absl::AnyInvocable<DestinationOperandInterface*(int)>>;
+
+template <typename SourceOpEnum, typename Extractors>
+void AddKelvinV2SourceGetters(SourceOpGetterMap& getter_map,
+ RiscVEncodingCommon* /*absl_nonnull*/ common) {
+ // Source operand getters.
+ Insert(getter_map, *SourceOpEnum::kBImm12,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<int32_t>(
+ Extractors::Inst32Format::ExtractBImm(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kCSRUimm5,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<uint32_t>(
+ Extractors::Inst32Format::ExtractIUimm5(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kConst1, []() -> SourceOperandInterface* {
+ return new ImmediateOperand<uint32_t>(1);
+ });
+ Insert(getter_map, *SourceOpEnum::kConst2, []() -> SourceOperandInterface* {
+ return new ImmediateOperand<uint32_t>(2);
+ });
+ Insert(getter_map, *SourceOpEnum::kConst4, []() -> SourceOperandInterface* {
+ return new ImmediateOperand<uint32_t>(4);
+ });
+ Insert(getter_map, *SourceOpEnum::kCsr,
+ [common]() -> SourceOperandInterface* {
+ uint16_t csr_index =
+ Extractors::Inst32Format::ExtractUImm12(common->inst_word());
+ absl::StatusOr<RiscVCsrInterface*> csr_status =
+ common->state()->csr_set()->GetCsr(csr_index);
+ if (!csr_status.ok()) {
+ return new ImmediateOperand<uint32_t>(csr_index);
+ } else {
+ return new ImmediateOperand<uint32_t>(csr_index,
+ (*csr_status)->name());
+ }
+ });
+ Insert(getter_map, *SourceOpEnum::kFrs1,
+ [common]() -> SourceOperandInterface* {
+ int num = Extractors::RType::ExtractRs1(common->inst_word());
+ return GetRegisterSourceOp<RVFpRegister>(
+ common->state(), absl::StrCat(RiscVState::kFregPrefix, num),
+ kFRegisterAliases[num]);
+ });
+ Insert(getter_map, *SourceOpEnum::kFrs2,
+ [common]() -> SourceOperandInterface* {
+ int num = Extractors::RType::ExtractRs2(common->inst_word());
+ return GetRegisterSourceOp<RVFpRegister>(
+ common->state(), absl::StrCat(RiscVState::kFregPrefix, num),
+ kFRegisterAliases[num]);
+ });
+ Insert(getter_map, *SourceOpEnum::kFrs3,
+ [common]() -> SourceOperandInterface* {
+ int num = Extractors::R4Type::ExtractRs3(common->inst_word());
+ return GetRegisterSourceOp<RVFpRegister>(
+ common->state(), absl::StrCat(RiscVState::kFregPrefix, num),
+ kFRegisterAliases[num]);
+ });
+ Insert(getter_map, *SourceOpEnum::kIImm12,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<int32_t>(
+ Extractors::IType::ExtractImm12(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kIUimm5,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<uint32_t>(
+ Extractors::IType::ExtractIUimm5(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kJImm12,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<int32_t>(
+ Extractors::Inst32Format::ExtractImm12(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kJImm20,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<int32_t>(
+ Extractors::JType::ExtractJImm(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kNf, [common]() -> SourceOperandInterface* {
+ int num_fields = Extractors::VMem::ExtractNf(common->inst_word());
+ return new ImmediateOperand<uint8_t>(num_fields,
+ absl::StrCat(num_fields + 1));
+ });
+ Insert(getter_map, *SourceOpEnum::kPred,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<uint32_t>(
+ Extractors::Fence::ExtractPred(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kRUimm5,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<uint32_t>(
+ Extractors::RType::ExtractRUimm5(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kRm, [common]() -> SourceOperandInterface* {
+ int rm = Extractors::RType::ExtractFunc3(common->inst_word());
+ switch (rm) {
+ case 0:
+ return new IntLiteralOperand<0>();
+ case 1:
+ return new IntLiteralOperand<1>();
+ case 2:
+ return new IntLiteralOperand<2>();
+ case 3:
+ return new IntLiteralOperand<3>();
+ case 4:
+ return new IntLiteralOperand<4>();
+ case 5:
+ return new IntLiteralOperand<5>();
+ case 6:
+ return new IntLiteralOperand<6>();
+ case 7:
+ return new IntLiteralOperand<7>();
+ default:
+ return nullptr;
+ }
+ });
+ Insert(getter_map, *SourceOpEnum::kRs1,
+ [common]() -> SourceOperandInterface* {
+ int num = Extractors::RType::ExtractRs1(common->inst_word());
+ if (num == 0) return new IntLiteralOperand<0>({1});
+ return GetRegisterSourceOp<RV32Register>(
+ common->state(), absl::StrCat(RiscVState::kXregPrefix, num),
+ kXRegisterAliases[num]);
+ });
+ Insert(getter_map, *SourceOpEnum::kRs2,
+ [common]() -> SourceOperandInterface* {
+ int num = Extractors::RType::ExtractRs2(common->inst_word());
+ if (num == 0) return new IntLiteralOperand<0>({1});
+ return GetRegisterSourceOp<RV32Register>(
+ common->state(), absl::StrCat(RiscVState::kXregPrefix, num),
+ kXRegisterAliases[num]);
+ });
+ Insert(getter_map, *SourceOpEnum::kSImm12,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<int32_t>(
+ Extractors::SType::ExtractSImm(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kSimm5,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<int32_t>(
+ Extractors::VArith::ExtractSimm5(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kSucc,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<uint32_t>(
+ Extractors::Fence::ExtractSucc(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kUImm20,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<int32_t>(
+ Extractors::UType::ExtractUImm(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kUimm5,
+ [common]() -> SourceOperandInterface* {
+ return new ImmediateOperand<int32_t>(
+ Extractors::VArith::ExtractUimm5(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kVd, [common]() -> SourceOperandInterface* {
+ return GetVectorRegisterSourceOp<RVVectorRegister>(
+ common->state(), Extractors::VArith::ExtractVd(common->inst_word()));
+ });
+ Insert(getter_map, *SourceOpEnum::kVm, [common]() -> SourceOperandInterface* {
+ int vm = Extractors::VArith::ExtractVm(common->inst_word());
+ return new ImmediateOperand<bool>(vm, absl::StrCat("vm.", vm ? "t" : "f"));
+ });
+ Insert(getter_map, *SourceOpEnum::kVmask,
+ [common]() -> SourceOperandInterface* {
+ int vm = Extractors::VArith::ExtractVm(common->inst_word());
+ if (vm) {
+ // Unmasked, return the True mask.
+ return new RV32VectorTrueOperand(common->state());
+ }
+ // Masked. Return the mask register.
+ return GetVectorMaskRegisterSourceOp<RVVectorRegister>(
+ common->state(), 0);
+ });
+ Insert(getter_map, *SourceOpEnum::kVmaskTrue,
+ [common]() -> SourceOperandInterface* {
+ return new RV32VectorTrueOperand(common->state());
+ });
+ Insert(getter_map, *SourceOpEnum::kVs1,
+ [common]() -> SourceOperandInterface* {
+ int num = Extractors::VArith::ExtractVs1(common->inst_word());
+ return GetVectorRegisterSourceOp<RVVectorRegister>(common->state(),
+ num);
+ });
+ Insert(getter_map, *SourceOpEnum::kVs2,
+ [common]() -> SourceOperandInterface* {
+ int num = Extractors::VArith::ExtractVs2(common->inst_word());
+ return GetVectorRegisterSourceOp<RVVectorRegister>(common->state(),
+ num);
+ });
+ Insert(getter_map, *SourceOpEnum::kVs3,
+ [common]() -> SourceOperandInterface* {
+ int num = Extractors::VMem::ExtractVs3(common->inst_word());
+ return GetVectorRegisterSourceOp<RVVectorRegister>(common->state(),
+ num);
+ });
+ Insert(getter_map, *SourceOpEnum::kZimm10,
+ [common]() -> SourceOperandInterface* {
+ int num = Extractors::VConfig::ExtractZimm10(common->inst_word());
+ return new ImmediateOperand<int32_t>(num);
+ });
+ Insert(getter_map, *SourceOpEnum::kZimm11,
+ [common]() -> SourceOperandInterface* {
+ int num = Extractors::VConfig::ExtractZimm11(common->inst_word());
+ return new ImmediateOperand<int32_t>(num);
+ });
+}
+
+template <typename DestOpEnum, typename Extractors>
+void AddKelvinV2DestGetters(DestOpGetterMap& getter_map,
+ RiscVEncodingCommon* /*absl_nonnull*/ common) {
+ // Destination operand getters.
+ Insert(getter_map, *DestOpEnum::kCsr,
+ [common](int latency) -> DestinationOperandInterface* {
+ return GetRegisterDestinationOp<RV32Register>(
+ common->state(), RiscVState::kCsrName, latency);
+ });
+ Insert(getter_map, *DestOpEnum::kFflags,
+ [common](int latency) -> DestinationOperandInterface* {
+ return GetCSRSetBitsDestinationOp<uint32_t>(common->state(),
+ "fflags", latency, "");
+ });
+ Insert(getter_map, *DestOpEnum::kFrd,
+ [common](int latency) -> DestinationOperandInterface* {
+ int num = Extractors::RType::ExtractRd(common->inst_word());
+ return GetRegisterDestinationOp<RVFpRegister>(
+ common->state(), absl::StrCat(RiscVState::kFregPrefix, num),
+ latency, kFRegisterAliases[num]);
+ });
+ Insert(getter_map, *DestOpEnum::kNextPc,
+ [common](int latency) -> DestinationOperandInterface* {
+ return GetRegisterDestinationOp<RV32Register>(
+ common->state(), RiscVState::kPcName, latency);
+ });
+ Insert(getter_map, *DestOpEnum::kRd,
+ [common](int latency) -> DestinationOperandInterface* {
+ int num = Extractors::RType::ExtractRd(common->inst_word());
+ if (num == 0) {
+ return GetRegisterDestinationOp<RV32Register>(common->state(),
+ "X0Dest", 0);
+ } else {
+ return GetRegisterDestinationOp<RV32Register>(
+ common->state(), absl::StrCat(RiscVState::kXregPrefix, num),
+ latency, kXRegisterAliases[num]);
+ }
+ });
+ Insert(getter_map, *DestOpEnum::kVd,
+ [common](int latency) -> DestinationOperandInterface* {
+ int num = Extractors::VArith::ExtractVd(common->inst_word());
+ return GetVectorRegisterDestinationOp<RVVectorRegister>(
+ common->state(), latency, num);
+ });
+}
+
+} // namespace kelvin::sim
+
+#endif // SIM_KELVIN_V2_GETTERS_H_
diff --git a/sim/kelvin_v2_state.cc b/sim/kelvin_v2_state.cc
new file mode 100644
index 0000000..b930463
--- /dev/null
+++ b/sim/kelvin_v2_state.cc
@@ -0,0 +1,48 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "sim/kelvin_v2_state.h"
+
+#include <cstdint>
+
+#include "absl/base/nullability.h"
+#include "absl/strings/string_view.h"
+#include "riscv/riscv_state.h"
+#include "mpact/sim/util/memory/memory_interface.h"
+
+namespace kelvin::sim {
+using ::mpact::sim::riscv::RiscVXlen;
+using ::mpact::sim::util::AtomicMemoryOpInterface;
+using ::mpact::sim::util::MemoryInterface;
+
+// StretchMisa32 stretches the 32-bit value into a 64-bit value by moving the
+// upper 2 bits to the lower 32 bits.
+static inline uint64_t StretchMisa32(uint32_t value) {
+ uint64_t value64 = static_cast<uint64_t>(value);
+ value64 = ((value64 & 0xc000'0000) << 32) | (value64 & 0x03ff'ffff);
+ return value64;
+}
+
+constexpr uint32_t kKelvinV2MisaInitialValue = 0x40201120;
+
+KelvinV2State::KelvinV2State(
+ absl::string_view id, RiscVXlen xlen, MemoryInterface* /*absl_nonnull*/ memory,
+ AtomicMemoryOpInterface* /*absl_nullable*/ atomic_memory)
+ : RiscVState(id, xlen, memory, atomic_memory) {
+ // Set the initial value of the misa CSR to the Kelvin V2 ISA value.
+ misa()->Set(StretchMisa32(kKelvinV2MisaInitialValue));
+}
+KelvinV2State::~KelvinV2State() = default;
+
+} // namespace kelvin::sim
diff --git a/sim/kelvin_v2_state.h b/sim/kelvin_v2_state.h
new file mode 100644
index 0000000..33d7dd7
--- /dev/null
+++ b/sim/kelvin_v2_state.h
@@ -0,0 +1,47 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SIM_KELVIN_V2_STATE_H_
+#define SIM_KELVIN_V2_STATE_H_
+
+#include "absl/base/nullability.h"
+#include "absl/strings/string_view.h"
+#include "riscv/riscv_state.h"
+#include "mpact/sim/util/memory/memory_interface.h"
+
+namespace kelvin::sim {
+class KelvinV2State : public ::mpact::sim::riscv::RiscVState {
+ public:
+ using AtomicMemoryOpInterface = ::mpact::sim::util::AtomicMemoryOpInterface;
+ using MemoryInterface = ::mpact::sim::util::MemoryInterface;
+ using RiscVState = ::mpact::sim::riscv::RiscVState;
+ using RiscVXlen = ::mpact::sim::riscv::RiscVXlen;
+
+ KelvinV2State(absl::string_view id, RiscVXlen xlen,
+ MemoryInterface* /*absl_nonnull*/ memory,
+ AtomicMemoryOpInterface* /*absl_nullable*/ atomic_memory);
+ KelvinV2State(absl::string_view id, RiscVXlen xlen,
+ MemoryInterface* /*absl_nonnull*/ memory)
+ : KelvinV2State(id, xlen, memory, nullptr) {}
+ ~KelvinV2State() override;
+
+ // Deleted Constructors and operators.
+ KelvinV2State(const KelvinV2State&) = delete;
+ KelvinV2State(KelvinV2State&&) = delete;
+ KelvinV2State& operator=(const KelvinV2State&) = delete;
+ KelvinV2State& operator=(KelvinV2State&&) = delete;
+};
+} // namespace kelvin::sim
+
+#endif // SIM_KELVIN_V2_STATE_H_
diff --git a/sim/kelvin_v2_user_decoder.cc b/sim/kelvin_v2_user_decoder.cc
new file mode 100644
index 0000000..bd1428d
--- /dev/null
+++ b/sim/kelvin_v2_user_decoder.cc
@@ -0,0 +1,97 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "sim/kelvin_v2_user_decoder.h"
+
+#include <cstdint>
+#include <memory>
+
+#include "sim/kelvin_v2_decoder.h"
+#include "sim/kelvin_v2_encoding.h"
+#include "sim/kelvin_v2_enums.h"
+#include "sim/kelvin_v2_state.h"
+#include "absl/base/nullability.h"
+#include "riscv/riscv_state.h"
+#include "mpact/sim/generic/instruction.h"
+#include "mpact/sim/generic/program_error.h"
+#include "mpact/sim/util/memory/memory_interface.h"
+
+namespace kelvin::sim {
+using ::kelvin::sim::KelvinV2Encoding;
+using ::kelvin::sim::KelvinV2State;
+using ::kelvin::sim::isa32_v2::KelvinV2InstructionSet;
+using ::mpact::sim::generic::Instruction;
+using ::mpact::sim::generic::ProgramErrorController;
+using ::mpact::sim::generic::operator*; // NOLINT
+using ::kelvin::sim::isa32_v2::kOpcodeNames;
+using ::kelvin::sim::isa32_v2::OpcodeEnum;
+using ::mpact::sim::riscv::ExceptionCode;
+using ::mpact::sim::util::MemoryInterface;
+
+KelvinV2UserDecoder::KelvinV2UserDecoder(KelvinV2State* /*absl_nonnull*/ state,
+ MemoryInterface* /*absl_nonnull*/ memory)
+ : state_(state), memory_(memory) {
+ // Need a data buffer to load instructions from memory. Allocate a single
+ // buffer that can be reused for each instruction word.
+ inst_db_ = state_->db_factory()->Allocate<uint32_t>(1);
+ // Allocate the isa factory class, the top level isa decoder instance, and
+ // the encoding parser.
+ kelvin_v2_isa_factory_ = std::make_unique<KelvinV2IsaFactory>();
+ kelvin_v2_isa_ = std::make_unique<KelvinV2InstructionSet>(
+ state, kelvin_v2_isa_factory_.get());
+ kelvin_v2_encoding_ = std::make_unique<KelvinV2Encoding>(state);
+ decode_error_ = state->program_error_controller()->GetProgramError(
+ ProgramErrorController::kInternalErrorName);
+}
+
+KelvinV2UserDecoder::~KelvinV2UserDecoder() { inst_db_->DecRef(); }
+
+Instruction* KelvinV2UserDecoder::DecodeInstruction(uint64_t address) {
+ // Address alignment check.
+ if (address & 0x1) {
+ Instruction* inst = new Instruction(0, state_);
+ inst->set_size(1);
+ inst->SetDisassemblyString("Misaligned instruction address");
+ inst->set_opcode(*::kelvin::sim::isa32_v2::OpcodeEnum::kNone);
+ inst->set_address(address);
+ inst->set_semantic_function([this](Instruction* inst) {
+ state_->Trap(/*is_interrupt*/ false, inst->address(),
+ *ExceptionCode::kInstructionAddressMisaligned,
+ inst->address() ^ 0x1, inst);
+ });
+ return inst;
+ }
+
+ // TODO - b/442008530: Trigger a decoder failure if address is outside the
+ // ITCM range.
+
+ // Read the instruction word from memory and parse it in the encoding parser.
+ memory_->Load(address, inst_db_, nullptr, nullptr);
+ uint32_t iword = inst_db_->Get<uint32_t>(0);
+ kelvin_v2_encoding_->ParseInstruction(iword);
+
+ // Call the isa decoder to obtain a new instruction object for the instruction
+ // word that was parsed above.
+ return kelvin_v2_isa_->Decode(address, kelvin_v2_encoding_.get());
+}
+
+int KelvinV2UserDecoder::GetNumOpcodes() const {
+ return static_cast<int>(OpcodeEnum::kPastMaxValue);
+}
+
+const char* KelvinV2UserDecoder::GetOpcodeName(int index) const {
+ return kOpcodeNames[index];
+}
+
+} // namespace kelvin::sim
diff --git a/sim/kelvin_v2_user_decoder.h b/sim/kelvin_v2_user_decoder.h
new file mode 100644
index 0000000..9cc83e7
--- /dev/null
+++ b/sim/kelvin_v2_user_decoder.h
@@ -0,0 +1,83 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef SIM_KELVIN_V2_USER_DECODER_H_
+#define SIM_KELVIN_V2_USER_DECODER_H_
+
+#include <cstdint>
+#include <memory>
+
+#include "sim/kelvin_v2_decoder.h"
+#include "sim/kelvin_v2_encoding.h"
+#include "sim/kelvin_v2_state.h"
+#include "absl/base/nullability.h"
+#include "mpact/sim/generic/arch_state.h"
+#include "mpact/sim/generic/data_buffer.h"
+#include "mpact/sim/generic/decoder_interface.h"
+#include "mpact/sim/generic/instruction.h"
+#include "mpact/sim/generic/program_error.h"
+#include "mpact/sim/util/memory/memory_interface.h"
+
+namespace kelvin::sim {
+
+// This is the factory class needed by the generated decoder. It is responsible
+// for creating the decoder for each slot instance. Since the riscv architecture
+// only has a single slot, it's a pretty simple class.
+class KelvinV2IsaFactory
+ : public ::kelvin::sim::isa32_v2::KelvinV2InstructionSetFactory {
+ using ArchState = ::mpact::sim::generic::ArchState;
+ using KelvinV2Slot = ::kelvin::sim::isa32_v2::KelvinV2Slot;
+
+ public:
+ std::unique_ptr<KelvinV2Slot> CreateKelvinV2Slot(ArchState* state) override {
+ return std::make_unique<KelvinV2Slot>(state);
+ }
+};
+
+class KelvinV2UserDecoder : public ::mpact::sim::generic::DecoderInterface {
+ public:
+ using DataBuffer = ::mpact::sim::generic::DataBuffer;
+ using Instruction = ::mpact::sim::generic::Instruction;
+ using KelvinV2Encoding = ::kelvin::sim::KelvinV2Encoding;
+ using KelvinV2InstructionSet =
+ ::kelvin::sim::isa32_v2::KelvinV2InstructionSet;
+ using MemoryInterface = ::mpact::sim::util::MemoryInterface;
+ using ProgramError = ::mpact::sim::generic::ProgramError;
+
+ KelvinV2UserDecoder(KelvinV2State* /*absl_nonnull*/ state,
+ MemoryInterface* /*absl_nonnull*/ memory);
+ ~KelvinV2UserDecoder() override;
+
+ // Decodes an instruction at the given address.
+ Instruction* DecodeInstruction(uint64_t address) override;
+
+ // Returns the number of opcodes supported by this decoder.
+ int GetNumOpcodes() const override;
+
+ // Returns the name of the opcode at the given index.
+ const char* GetOpcodeName(int index) const override;
+
+ private:
+ KelvinV2State* state_;
+ MemoryInterface* memory_;
+ DataBuffer* inst_db_;
+ std::unique_ptr<ProgramError> decode_error_;
+ std::unique_ptr<KelvinV2Encoding> kelvin_v2_encoding_;
+ std::unique_ptr<KelvinV2IsaFactory> kelvin_v2_isa_factory_;
+ std::unique_ptr<KelvinV2InstructionSet> kelvin_v2_isa_;
+};
+
+} // namespace kelvin::sim
+
+#endif // SIM_KELVIN_V2_USER_DECODER_H_
diff --git a/sim/test/BUILD b/sim/test/BUILD
index f966a87..920d836 100644
--- a/sim/test/BUILD
+++ b/sim/test/BUILD
@@ -206,8 +206,25 @@
)
cc_test(
+ name = "kelvin_v2_user_decoder_test",
+ srcs = ["kelvin_v2_user_decoder_test.cc"],
+ copts = ["-Werror"],
+ deps = [
+ "//sim:kelvin_v2_isa",
+ "//sim:kelvin_v2_state",
+ "//sim:kelvin_v2_user_decoder",
+ "@com_google_absl//absl/memory",
+ "@com_google_googletest//:gtest_main",
+ "@com_google_mpact-riscv//riscv:riscv_state",
+ "@com_google_mpact-sim//mpact/sim/generic:core",
+ "@com_google_mpact-sim//mpact/sim/util/memory",
+ ],
+)
+
+cc_test(
name = "kelvin_cosim_dpi_wrapper_test",
srcs = ["kelvin_cosim_dpi_wrapper_test.cc"],
+ copts = ["-Werror"],
deps = [
"//sim/cosim:kelvin_cosim_lib",
"@com_google_googletest//:gtest_main",
diff --git a/sim/test/kelvin_cosim_dpi_wrapper_test.cc b/sim/test/kelvin_cosim_dpi_wrapper_test.cc
index 0c5fca5..035230f 100644
--- a/sim/test/kelvin_cosim_dpi_wrapper_test.cc
+++ b/sim/test/kelvin_cosim_dpi_wrapper_test.cc
@@ -6,61 +6,103 @@
namespace {
-const uint32_t kLoadImmediateToX5 = 0b11011110101011011011'00101'0110111;
-const uint32_t kAddImmediateToX5_2047 = 0b011111111111'00101'000'00101'0010011;
-const uint32_t kAddImmediateToX5_1776 = 0b011011110000'00101'000'00101'0010011;
-const uint32_t kExpectedX5Value = 0xdeadbeef;
-const uint32_t kNopInstruction = 0x00000013; // x0 = x0 + 0 (nop)
-const uint32_t kMcycleCsrAddress = 0xb00;
+constexpr uint32_t kLoadImmediateToX5 = 0b11011110101011011011'00101'0110111;
+constexpr uint32_t kFmvX5ToF5 = 0b1111000'00000'00101'000'00101'1010011;
+constexpr uint32_t kAddImmediateToX5_2047 =
+ 0b011111111111'00101'000'00101'0010011;
+constexpr uint32_t kAddImmediateToX5_1776 =
+ 0b011011110000'00101'000'00101'0010011;
+constexpr uint32_t kExpectedX5Value = 0xdeadbeef;
+constexpr uint32_t kNopInstruction = 0x00000013; // x0 = x0 + 0 (nop)
+constexpr uint32_t kExpectedMisaValue = 0x40201120;
class CosimFixture : public ::testing::Test {
public:
CosimFixture() { mpact_init(); }
~CosimFixture() override { mpact_fini(); }
+
+ int add_test_values_to_x5() {
+ int status = 0;
+ status = mpact_step_wrapper(kLoadImmediateToX5);
+ if (status != 0) {
+ return status;
+ }
+ status = mpact_step_wrapper(kAddImmediateToX5_2047);
+ if (status != 0) {
+ return status;
+ }
+ status = mpact_step_wrapper(kAddImmediateToX5_1776);
+ return status;
+ }
+
+ int mpact_step_wrapper(uint32_t instruction) {
+ int status = 0;
+ svLogicVecVal instruction_struct;
+ instruction_struct.aval = instruction;
+ instruction_struct.bval = 0;
+ status = mpact_step(&instruction_struct);
+ return status;
+ }
};
-TEST_F(CosimFixture, Step) {
- svLogicVecVal instruction;
- instruction.aval = 0x00000000;
- EXPECT_EQ(mpact_step(&instruction), 0);
+TEST_F(CosimFixture, GetPc) {
+ uint32_t pc_value = 1;
+ EXPECT_EQ(mpact_get_register("pc", &pc_value), 0);
+ EXPECT_EQ(pc_value, 0);
}
-TEST_F(CosimFixture, GetPc) { EXPECT_EQ(mpact_get_pc(), 0); }
-
TEST_F(CosimFixture, GetPcAfterStep) {
- svLogicVecVal instruction;
- instruction.aval = kNopInstruction;
- EXPECT_EQ(mpact_step(&instruction), 0);
- EXPECT_EQ(mpact_get_pc(), 4);
+ uint32_t pc_value = 1;
+ EXPECT_EQ(mpact_step_wrapper(kNopInstruction), 0);
+ EXPECT_EQ(mpact_get_register("pc", &pc_value), 0);
+ EXPECT_EQ(pc_value, 4);
}
TEST_F(CosimFixture, GetPcAfterReset) {
- svLogicVecVal instruction;
- instruction.aval = kNopInstruction; // x0 = x0 + 0 (nop)
- EXPECT_EQ(mpact_step(&instruction), 0);
- EXPECT_NE(mpact_get_pc(), 0);
+ uint32_t pc_value = 1;
+ EXPECT_EQ(mpact_step_wrapper(kNopInstruction), 0);
+ EXPECT_EQ(mpact_get_register("pc", &pc_value), 0);
+ EXPECT_NE(pc_value, 0);
EXPECT_EQ(mpact_reset(), 0);
- EXPECT_EQ(mpact_get_pc(), 0);
+ EXPECT_EQ(mpact_get_register("pc", &pc_value), 0);
+ EXPECT_EQ(pc_value, 0);
}
-TEST_F(CosimFixture, CheckGpr) {
- EXPECT_EQ(mpact_get_gpr(5), 0);
- svLogicVecVal instruction;
- instruction.aval = kLoadImmediateToX5;
- EXPECT_EQ(mpact_step(&instruction), 0);
- instruction.aval = kAddImmediateToX5_2047;
- EXPECT_EQ(mpact_step(&instruction), 0);
- instruction.aval = kAddImmediateToX5_1776;
- EXPECT_EQ(mpact_step(&instruction), 0);
- EXPECT_EQ(mpact_get_gpr(5), kExpectedX5Value);
+TEST_F(CosimFixture, GetGpr) {
+ uint32_t gpr_value = 1;
+ EXPECT_EQ(mpact_get_register("x5", &gpr_value), 0);
+ EXPECT_EQ(gpr_value, 0);
+ EXPECT_EQ(add_test_values_to_x5(), 0);
+ EXPECT_EQ(mpact_get_register("x5", &gpr_value), 0);
+ EXPECT_EQ(gpr_value, kExpectedX5Value);
}
TEST_F(CosimFixture, GetMcycleCsr) {
- EXPECT_EQ(mpact_get_csr(kMcycleCsrAddress), 0);
- svLogicVecVal instruction;
- instruction.aval = kNopInstruction; // x0 = x0 + 0 (nop)
- EXPECT_EQ(mpact_step(&instruction), 0);
- EXPECT_EQ(mpact_get_csr(kMcycleCsrAddress), 1);
+ uint32_t mcycle_value = 12345;
+ EXPECT_EQ(mpact_get_register("mcycle", &mcycle_value), 0);
+ EXPECT_EQ(mcycle_value, 0);
+ EXPECT_EQ(mpact_step_wrapper(kNopInstruction), 0);
+ EXPECT_EQ(mpact_get_register("mcycle", &mcycle_value), 0);
+ EXPECT_EQ(mcycle_value, 1);
+}
+
+TEST_F(CosimFixture, GetFpr) {
+ uint32_t gpr_value = 1;
+ uint32_t fpr_value = 1;
+ EXPECT_EQ(mpact_get_register("x5", &gpr_value), 0);
+ EXPECT_EQ(gpr_value, 0);
+ EXPECT_EQ(mpact_get_register("f5", &fpr_value), 0);
+ EXPECT_EQ(fpr_value, 0);
+ EXPECT_EQ(add_test_values_to_x5(), 0);
+ EXPECT_EQ(mpact_step_wrapper(kFmvX5ToF5), 0);
+ EXPECT_EQ(mpact_get_register("f5", &fpr_value), 0);
+ EXPECT_EQ(fpr_value, kExpectedX5Value);
+}
+
+TEST_F(CosimFixture, GetMisaCsr) {
+ uint32_t misa_value = 0;
+ EXPECT_EQ(mpact_get_register("misa", &misa_value), 0);
+ EXPECT_EQ(misa_value, kExpectedMisaValue);
}
} // namespace
diff --git a/sim/test/kelvin_v2_user_decoder_test.cc b/sim/test/kelvin_v2_user_decoder_test.cc
new file mode 100644
index 0000000..d7111b9
--- /dev/null
+++ b/sim/test/kelvin_v2_user_decoder_test.cc
@@ -0,0 +1,113 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "sim/kelvin_v2_user_decoder.h"
+
+#include <cstdint>
+#include <memory>
+
+#include "sim/kelvin_v2_encoding.h"
+#include "sim/kelvin_v2_enums.h"
+#include "sim/kelvin_v2_state.h"
+#include "googletest/include/gtest/gtest.h"
+#include "absl/memory/memory.h"
+#include "riscv/riscv_state.h"
+#include "mpact/sim/generic/data_buffer.h"
+#include "mpact/sim/util/memory/flat_demand_memory.h"
+#include "mpact/sim/util/memory/memory_interface.h"
+
+namespace {
+
+using ::kelvin::sim::KelvinV2Encoding;
+using ::kelvin::sim::KelvinV2State;
+using ::kelvin::sim::KelvinV2UserDecoder;
+using ::kelvin::sim::isa32_v2::DestOpEnum;
+using ::kelvin::sim::isa32_v2::kDestOpNames;
+using ::kelvin::sim::isa32_v2::kSourceOpNames;
+using ::kelvin::sim::isa32_v2::OpcodeEnum;
+using ::kelvin::sim::isa32_v2::SourceOpEnum;
+using ::mpact::sim::generic::DataBuffer;
+using ::mpact::sim::riscv::Instruction;
+using ::mpact::sim::riscv::RiscVXlen;
+using ::mpact::sim::generic::operator*; // NOLINT: clang-tidy false positive.
+using ::mpact::sim::util::FlatDemandMemory;
+using ::mpact::sim::util::MemoryInterface;
+
+// addi x1, x1, 0
+constexpr uint32_t kNopAddiInstruction = 0b000000000000'00001'000'00001'0010011;
+
+class KelvinV2UserDecoderFixture : public ::testing::Test {
+ public:
+ void SetUp() override {
+ memory_ = std::make_unique<FlatDemandMemory>();
+ state_ = std::make_unique<KelvinV2State>("KelvinV2", RiscVXlen::RV32,
+ memory_.get());
+ decoder_ =
+ std::make_unique<KelvinV2UserDecoder>(state_.get(), memory_.get());
+ }
+
+ protected:
+ std::unique_ptr<KelvinV2State> state_;
+ std::unique_ptr<MemoryInterface> memory_;
+ std::unique_ptr<KelvinV2UserDecoder> decoder_;
+};
+
+TEST_F(KelvinV2UserDecoderFixture, TestGetNumOpcodes) {
+ EXPECT_NE(decoder_->GetNumOpcodes(), 0);
+}
+
+TEST_F(KelvinV2UserDecoderFixture, DecodeInstruction) {
+ uint64_t test_address = 0;
+ DataBuffer* inst_db = state_->db_factory()->Allocate<uint32_t>(1);
+ inst_db->Set<uint32_t>(/*index=*/0, kNopAddiInstruction);
+ memory_->Store(test_address, inst_db);
+ std::unique_ptr<Instruction> instruction =
+ absl::WrapUnique(decoder_->DecodeInstruction(test_address));
+ EXPECT_NE(instruction.get(), nullptr);
+ EXPECT_EQ(instruction->opcode(), *OpcodeEnum::kAddi);
+ inst_db->DecRef();
+}
+
+class KelvinV2EncodingFixture : public ::testing::Test {
+ public:
+ void SetUp() override {
+ memory_ = std::make_unique<FlatDemandMemory>();
+ state_ = std::make_unique<KelvinV2State>("KelvinV2", RiscVXlen::RV32,
+ memory_.get());
+ encoding_ = std::make_unique<KelvinV2Encoding>(state_.get());
+ }
+
+ protected:
+ std::unique_ptr<KelvinV2State> state_;
+ std::unique_ptr<MemoryInterface> memory_;
+ std::unique_ptr<KelvinV2Encoding> encoding_;
+};
+
+TEST_F(KelvinV2EncodingFixture, AllSourceOpsHaveGetters) {
+ for (int i = *SourceOpEnum::kNone; i < *SourceOpEnum::kPastMaxValue; i++) {
+ EXPECT_TRUE(encoding_->source_op_getters().contains(i))
+ << "No source operand for enum value " << i << " (" << kSourceOpNames[i]
+ << ")";
+ }
+}
+
+TEST_F(KelvinV2EncodingFixture, AllDestOpsHaveGetters) {
+ for (int i = *DestOpEnum::kNone; i < *DestOpEnum::kPastMaxValue; i++) {
+ EXPECT_TRUE(encoding_->dest_op_getters().contains(i))
+ << "No dest operand for enum value " << i << " (" << kDestOpNames[i]
+ << ")";
+ }
+}
+
+} // namespace