summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2024-03-04 21:55:10 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2024-03-04 21:55:10 +0000
commitb8626a8f220f804e6a3ccddd112a84538f84ba1a (patch)
tree68bce40c458fa12d77a066a6d7c1571226762ee8
parent30412219cd7262d2238886f4c2cd7ce0a5674f5e (diff)
parentc46c13c5bb056d82ad971dd3047d6c9782de7006 (diff)
downloadart-simpleperf-release.tar.gz
Snap for 11526323 from c46c13c5bb056d82ad971dd3047d6c9782de7006 to simpleperf-releasesimpleperf-release
Change-Id: If4ad6cac2e0952f348f43ab1507e9c2af6c20539
-rw-r--r--.gitignore1
-rw-r--r--artd/Android.bp19
-rw-r--r--artd/art_standalone_artd_tests.xml2
-rw-r--r--compiler/art_standalone_compiler_tests.xml2
-rw-r--r--compiler/dex/inline_method_analyser.cc186
-rw-r--r--compiler/dex/inline_method_analyser.h41
-rw-r--r--compiler/optimizing/constant_folding.cc51
-rw-r--r--compiler/optimizing/inliner.cc32
-rw-r--r--compiler/optimizing/inliner.h1
-rw-r--r--compiler/optimizing/intrinsics_riscv64.cc70
-rw-r--r--compiler/utils/riscv64/assembler_riscv64.cc944
-rw-r--r--compiler/utils/riscv64/assembler_riscv64.h132
-rw-r--r--compiler/utils/riscv64/assembler_riscv64_test.cc138
-rw-r--r--compiler/utils/riscv64/jni_macro_assembler_riscv64.cc24
-rw-r--r--dex2oat/art_standalone_dex2oat_cts_tests.xml2
-rw-r--r--dex2oat/art_standalone_dex2oat_tests.xml2
-rw-r--r--dexoptanalyzer/art_standalone_dexoptanalyzer_tests.xml2
-rw-r--r--libartservice/service/Android.bp9
-rw-r--r--libartservice/service/java/com/android/server/art/ReasonMapping.java7
-rw-r--r--libarttools/Android.bp7
-rw-r--r--libdexfile/Android.bp2
-rw-r--r--libdexfile/art_standalone_libdexfile_tests.xml2
-rw-r--r--libprofile/art_standalone_libprofile_tests.xml2
-rw-r--r--oatdump/art_standalone_oatdump_tests.xml2
-rw-r--r--odrefresh/Android.bp8
-rw-r--r--profman/art_standalone_profman_tests.xml2
-rw-r--r--runtime/art_standalone_runtime_tests.xml2
-rw-r--r--runtime/jit/jit.cc8
-rw-r--r--runtime/thread-inl.h14
-rw-r--r--runtime/thread.cc8
-rw-r--r--runtime/thread.h34
-rw-r--r--runtime/thread_list.cc13
-rw-r--r--test/2243-checker-not-inline-into-throw/src/Main.java10
-rw-r--r--test/442-checker-constant-folding/src/Main.java85
-rw-r--r--test/476-checker-ctor-fence-redun-elim/src/Main.java9
-rw-r--r--test/476-checker-ctor-memory-barrier/src/Main.java4
-rw-r--r--test/569-checker-pattern-replacement/build.py20
-rw-r--r--test/569-checker-pattern-replacement/src-multidex/Second.java5
-rw-r--r--test/569-checker-pattern-replacement/src/Main.java27
-rw-r--r--test/639-checker-code-sinking/src/Main.java6
-rw-r--r--test/art-gtests-target-standalone-cts-template.xml2
-rw-r--r--test/art-gtests-target-standalone-template.xml2
-rw-r--r--test/art-gtests-target-standalone-with-boot-image-template.xml2
-rw-r--r--test/dexpreopt/art_standalone_dexpreopt_tests.xml2
-rw-r--r--test/knownfailures.json18
-rw-r--r--test/testrunner/env.py7
-rw-r--r--test/testrunner/ssh_config6
-rwxr-xr-xtest/testrunner/testrunner.py3
-rw-r--r--tools/luci/config/generated/cr-buildbucket.cfg3
-rwxr-xr-xtools/luci/config/main.star3
-rwxr-xr-xtools/run-libcore-tests.py2
51 files changed, 1635 insertions, 350 deletions
diff --git a/.gitignore b/.gitignore
index 803c297b07..dd264ef134 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,3 +4,4 @@ JIT_ART
**/*.iml
**/*.pyc
**/*.swn
+tools/boot.out
diff --git a/artd/Android.bp b/artd/Android.bp
index 8b74981e2f..50f5428435 100644
--- a/artd/Android.bp
+++ b/artd/Android.bp
@@ -35,11 +35,8 @@ cc_defaults {
"profman_headers",
],
shared_libs: [
- "libartservice",
- "libarttools", // Contains "libc++fs".
"libbase",
"libbinder_ndk",
- "libdexfile",
"libselinux",
],
static_libs: [
@@ -57,6 +54,9 @@ art_cc_binary {
shared_libs: [
"libart",
"libartbase",
+ "libartservice",
+ "libarttools", // Contains "libc++fs".
+ "libdexfile",
],
apex_available: [
"com.android.art",
@@ -93,6 +93,11 @@ art_cc_test {
"art_gtest_defaults",
"art_artd_tests_defaults",
],
+ shared_libs: [
+ "libartservice",
+ "libarttools",
+ "libdexfile",
+ ],
}
// Standalone version of ART gtest `art_artd_tests`, not bundled with the ART
@@ -103,6 +108,11 @@ art_cc_test {
"art_standalone_gtest_defaults",
"art_artd_tests_defaults",
],
+ static_libs: [
+ "libartservice",
+ "libarttools",
+ "libdexfile",
+ ],
test_config_template: "art_standalone_artd_tests.xml",
}
@@ -124,6 +134,9 @@ cc_fuzz {
shared_libs: [
"libart",
"libartbase",
+ "libartservice",
+ "libarttools",
+ "libdexfile",
"liblog",
],
fuzz_config: {
diff --git a/artd/art_standalone_artd_tests.xml b/artd/art_standalone_artd_tests.xml
index 27e7001218..a5c6c994b1 100644
--- a/artd/art_standalone_artd_tests.xml
+++ b/artd/art_standalone_artd_tests.xml
@@ -33,8 +33,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/{MODULE}" />
<option name="module-name" value="{MODULE}" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/compiler/art_standalone_compiler_tests.xml b/compiler/art_standalone_compiler_tests.xml
index 8e8636cca4..c2065dd766 100644
--- a/compiler/art_standalone_compiler_tests.xml
+++ b/compiler/art_standalone_compiler_tests.xml
@@ -47,8 +47,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/art_standalone_compiler_tests" />
<option name="module-name" value="art_standalone_compiler_tests" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index 381db3d21d..91944b01c0 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -152,8 +152,7 @@ ArtMethod* GetTargetConstructor(ArtMethod* method, const Instruction* invoke_dir
if (kIsDebugBuild && target_method != nullptr) {
CHECK(!target_method->IsStatic());
CHECK(target_method->IsConstructor());
- CHECK(target_method->GetDeclaringClass() == method->GetDeclaringClass() ||
- target_method->GetDeclaringClass() == method->GetDeclaringClass()->GetSuperClass());
+ CHECK(method->GetDeclaringClass()->IsSubClass(target_method->GetDeclaringClass()));
}
return target_method;
}
@@ -256,11 +255,11 @@ bool DoAnalyseConstructor(const CodeItemDataAccessor* code_item,
/*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts])
REQUIRES_SHARED(Locks::mutator_lock_) {
// On entry we should not have any IPUTs yet.
- DCHECK_EQ(0, std::count_if(
+ DCHECK(std::all_of(
iputs,
iputs + arraysize(iputs),
[](const ConstructorIPutData& iput_data) {
- return iput_data.field_index != DexFile::kDexNoIndex16;
+ return iput_data.field_index == DexFile::kDexNoIndex16;
}));
// Limit the maximum number of code units we're willing to match.
@@ -396,56 +395,36 @@ bool AnalyseConstructor(const CodeItemDataAccessor* code_item,
return true;
}
-static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET), "iget type");
-static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE), "iget_wide type");
-static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT),
- "iget_object type");
-static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BOOLEAN),
- "iget_boolean type");
-static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BYTE), "iget_byte type");
-static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_CHAR), "iget_char type");
-static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_SHORT), "iget_short type");
-static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT), "iput type");
-static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_WIDE), "iput_wide type");
-static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_OBJECT),
- "iput_object type");
-static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BOOLEAN),
- "iput_boolean type");
-static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BYTE), "iput_byte type");
-static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_CHAR), "iput_char type");
-static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_SHORT), "iput_short type");
-static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT), "iget/iput variant");
-static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE), "iget/iput_wide variant");
-static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT), "iget/iput_object variant");
-static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN), "iget/iput_boolean variant");
-static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE), "iget/iput_byte variant");
-static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR), "iget/iput_char variant");
-static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), "iget/iput_short variant");
-
-bool InlineMethodAnalyser::AnalyseMethodCode(ArtMethod* method, InlineMethod* result) {
- CodeItemDataAccessor code_item(method->DexInstructionData());
- if (!code_item.HasCodeItem()) {
- // Native or abstract.
- return false;
- }
- return AnalyseMethodCode(&code_item,
- MethodReference(method->GetDexFile(), method->GetDexMethodIndex()),
- method->IsStatic(),
- method,
- result);
-}
-
-bool InlineMethodAnalyser::AnalyseMethodCode(const CodeItemDataAccessor* code_item,
- const MethodReference& method_ref,
- bool is_static,
- ArtMethod* method,
+static_assert(IsInstructionIGet(Instruction::IGET));
+static_assert(IsInstructionIGet(Instruction::IGET_WIDE));
+static_assert(IsInstructionIGet(Instruction::IGET_OBJECT));
+static_assert(IsInstructionIGet(Instruction::IGET_BOOLEAN));
+static_assert(IsInstructionIGet(Instruction::IGET_BYTE));
+static_assert(IsInstructionIGet(Instruction::IGET_CHAR));
+static_assert(IsInstructionIGet(Instruction::IGET_SHORT));
+static_assert(IsInstructionIPut(Instruction::IPUT));
+static_assert(IsInstructionIPut(Instruction::IPUT_WIDE));
+static_assert(IsInstructionIPut(Instruction::IPUT_OBJECT));
+static_assert(IsInstructionIPut(Instruction::IPUT_BOOLEAN));
+static_assert(IsInstructionIPut(Instruction::IPUT_BYTE));
+static_assert(IsInstructionIPut(Instruction::IPUT_CHAR));
+static_assert(IsInstructionIPut(Instruction::IPUT_SHORT));
+static_assert(IGetMemAccessType(Instruction::IGET) == IPutMemAccessType(Instruction::IPUT));
+static_assert(
+ IGetMemAccessType(Instruction::IGET_WIDE) == IPutMemAccessType(Instruction::IPUT_WIDE));
+static_assert(
+ IGetMemAccessType(Instruction::IGET_OBJECT) == IPutMemAccessType(Instruction::IPUT_OBJECT));
+static_assert(
+ IGetMemAccessType(Instruction::IGET_BOOLEAN) == IPutMemAccessType(Instruction::IPUT_BOOLEAN));
+static_assert(
+ IGetMemAccessType(Instruction::IGET_BYTE) == IPutMemAccessType(Instruction::IPUT_BYTE));
+static_assert(
+ IGetMemAccessType(Instruction::IGET_CHAR) == IPutMemAccessType(Instruction::IPUT_CHAR));
+static_assert(
+ IGetMemAccessType(Instruction::IGET_SHORT) == IPutMemAccessType(Instruction::IPUT_SHORT));
+
+bool InlineMethodAnalyser::AnalyseMethodCode(ArtMethod* method,
+ const CodeItemDataAccessor* code_item,
InlineMethod* result) {
// We currently support only plain return or 2-instruction methods.
@@ -488,11 +467,7 @@ bool InlineMethodAnalyser::AnalyseMethodCode(const CodeItemDataAccessor* code_it
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT:
case Instruction::IGET_WIDE:
- // TODO: Add handling for JIT.
- // case Instruction::IGET_QUICK:
- // case Instruction::IGET_WIDE_QUICK:
- // case Instruction::IGET_OBJECT_QUICK:
- return AnalyseIGetMethod(code_item, method_ref, is_static, method, result);
+ return AnalyseIGetMethod(method, code_item, result);
case Instruction::IPUT:
case Instruction::IPUT_OBJECT:
case Instruction::IPUT_BOOLEAN:
@@ -500,19 +475,16 @@ bool InlineMethodAnalyser::AnalyseMethodCode(const CodeItemDataAccessor* code_it
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT:
case Instruction::IPUT_WIDE:
- // TODO: Add handling for JIT.
- // case Instruction::IPUT_QUICK:
- // case Instruction::IPUT_WIDE_QUICK:
- // case Instruction::IPUT_OBJECT_QUICK:
- return AnalyseIPutMethod(code_item, method_ref, is_static, method, result);
+ return AnalyseIPutMethod(method, code_item, result);
default:
return false;
}
}
-bool InlineMethodAnalyser::IsSyntheticAccessor(MethodReference ref) {
- const dex::MethodId& method_id = ref.dex_file->GetMethodId(ref.index);
- const char* method_name = ref.dex_file->GetMethodName(method_id);
+bool InlineMethodAnalyser::IsSyntheticAccessor(ArtMethod* method) {
+ const DexFile* dex_file = method->GetDexFile();
+ const dex::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
+ const char* method_name = dex_file->GetMethodName(method_id);
// javac names synthetic accessors "access$nnn",
// jack names them "-getN", "-putN", "-wrapN".
return strncmp(method_name, "access$", strlen("access$")) == 0 ||
@@ -572,10 +544,8 @@ bool InlineMethodAnalyser::AnalyseConstMethod(const CodeItemDataAccessor* code_i
return true;
}
-bool InlineMethodAnalyser::AnalyseIGetMethod(const CodeItemDataAccessor* code_item,
- const MethodReference& method_ref,
- bool is_static,
- ArtMethod* method,
+bool InlineMethodAnalyser::AnalyseIGetMethod(ArtMethod* method,
+ const CodeItemDataAccessor* code_item,
InlineMethod* result) {
DexInstructionIterator instruction = code_item->begin();
Instruction::Code opcode = instruction->Opcode();
@@ -607,39 +577,37 @@ bool InlineMethodAnalyser::AnalyseIGetMethod(const CodeItemDataAccessor* code_it
return false; // Not returning the value retrieved by IGET?
}
- if (is_static || object_arg != 0u) {
- // TODO: Implement inlining of IGET on non-"this" registers (needs correct stack trace for NPE).
- // Allow synthetic accessors. We don't care about losing their stack frame in NPE.
- if (!IsSyntheticAccessor(method_ref)) {
- return false;
- }
- }
-
// InlineIGetIPutData::object_arg is only 4 bits wide.
static constexpr uint16_t kMaxObjectArg = 15u;
if (object_arg > kMaxObjectArg) {
return false;
}
- if (result != nullptr) {
- InlineIGetIPutData* data = &result->d.ifield_data;
- if (!ComputeSpecialAccessorInfo(method, field_idx, false, data)) {
+ bool is_static = method->IsStatic();
+ if (is_static || object_arg != 0u) {
+ // TODO: Implement inlining of IGET on non-"this" registers (needs correct stack trace for NPE).
+ // Allow synthetic accessors. We don't care about losing their stack frame in NPE.
+ if (!IsSyntheticAccessor(method)) {
return false;
}
- result->opcode = kInlineOpIGet;
- data->op_variant = IGetVariant(opcode);
- data->method_is_static = is_static ? 1u : 0u;
- data->object_arg = object_arg; // Allow IGET on any register, not just "this".
- data->src_arg = 0u;
- data->return_arg_plus1 = 0u;
}
+
+ DCHECK(result != nullptr);
+ InlineIGetIPutData* data = &result->d.ifield_data;
+ if (!ComputeSpecialAccessorInfo(method, field_idx, false, data)) {
+ return false;
+ }
+ result->opcode = kInlineOpIGet;
+ data->op_variant = enum_cast<uint16_t>(IGetMemAccessType(opcode));
+ data->method_is_static = is_static ? 1u : 0u;
+ data->object_arg = object_arg; // Allow IGET on any register, not just "this".
+ data->src_arg = 0u;
+ data->return_arg_plus1 = 0u;
return true;
}
-bool InlineMethodAnalyser::AnalyseIPutMethod(const CodeItemDataAccessor* code_item,
- const MethodReference& method_ref,
- bool is_static,
- ArtMethod* method,
+bool InlineMethodAnalyser::AnalyseIPutMethod(ArtMethod* method,
+ const CodeItemDataAccessor* code_item,
InlineMethod* result) {
DexInstructionIterator instruction = code_item->begin();
Instruction::Code opcode = instruction->Opcode();
@@ -673,14 +641,6 @@ bool InlineMethodAnalyser::AnalyseIPutMethod(const CodeItemDataAccessor* code_it
uint32_t object_arg = object_reg - arg_start;
uint32_t src_arg = src_reg - arg_start;
- if (is_static || object_arg != 0u) {
- // TODO: Implement inlining of IPUT on non-"this" registers (needs correct stack trace for NPE).
- // Allow synthetic accessors. We don't care about losing their stack frame in NPE.
- if (!IsSyntheticAccessor(method_ref)) {
- return false;
- }
- }
-
// InlineIGetIPutData::object_arg/src_arg/return_arg_plus1 are each only 4 bits wide.
static constexpr uint16_t kMaxObjectArg = 15u;
static constexpr uint16_t kMaxSrcArg = 15u;
@@ -689,18 +649,26 @@ bool InlineMethodAnalyser::AnalyseIPutMethod(const CodeItemDataAccessor* code_it
return false;
}
- if (result != nullptr) {
- InlineIGetIPutData* data = &result->d.ifield_data;
- if (!ComputeSpecialAccessorInfo(method, field_idx, true, data)) {
+ bool is_static = method->IsStatic();
+ if (is_static || object_arg != 0u) {
+ // TODO: Implement inlining of IPUT on non-"this" registers (needs correct stack trace for NPE).
+ // Allow synthetic accessors. We don't care about losing their stack frame in NPE.
+ if (!IsSyntheticAccessor(method)) {
return false;
}
- result->opcode = kInlineOpIPut;
- data->op_variant = IPutVariant(opcode);
- data->method_is_static = is_static ? 1u : 0u;
- data->object_arg = object_arg; // Allow IPUT on any register, not just "this".
- data->src_arg = src_arg;
- data->return_arg_plus1 = return_arg_plus1;
}
+
+ DCHECK(result != nullptr);
+ InlineIGetIPutData* data = &result->d.ifield_data;
+ if (!ComputeSpecialAccessorInfo(method, field_idx, true, data)) {
+ return false;
+ }
+ result->opcode = kInlineOpIPut;
+ data->op_variant = enum_cast<uint16_t>(IPutMemAccessType(opcode));
+ data->method_is_static = is_static ? 1u : 0u;
+ data->object_arg = object_arg; // Allow IPUT on any register, not just "this".
+ data->src_arg = src_arg;
+ data->return_arg_plus1 = return_arg_plus1;
return true;
}
diff --git a/compiler/dex/inline_method_analyser.h b/compiler/dex/inline_method_analyser.h
index 99d07c6152..4cd5b824f1 100644
--- a/compiler/dex/inline_method_analyser.h
+++ b/compiler/dex/inline_method_analyser.h
@@ -21,7 +21,6 @@
#include "base/mutex.h"
#include "dex/dex_file.h"
#include "dex/dex_instruction.h"
-#include "dex/method_reference.h"
/*
* NOTE: This code is part of the quick compiler. It lives in the runtime
@@ -100,47 +99,23 @@ class InlineMethodAnalyser {
*
* @return true if the method is a candidate for inlining, false otherwise.
*/
- static bool AnalyseMethodCode(ArtMethod* method, InlineMethod* result)
+ static bool AnalyseMethodCode(ArtMethod* method,
+ const CodeItemDataAccessor* code_item,
+ InlineMethod* result)
REQUIRES_SHARED(Locks::mutator_lock_);
- static constexpr bool IsInstructionIGet(Instruction::Code opcode) {
- return Instruction::IGET <= opcode && opcode <= Instruction::IGET_SHORT;
- }
-
- static constexpr bool IsInstructionIPut(Instruction::Code opcode) {
- return Instruction::IPUT <= opcode && opcode <= Instruction::IPUT_SHORT;
- }
-
- static constexpr uint16_t IGetVariant(Instruction::Code opcode) {
- return opcode - Instruction::IGET;
- }
-
- static constexpr uint16_t IPutVariant(Instruction::Code opcode) {
- return opcode - Instruction::IPUT;
- }
-
// Determines whether the method is a synthetic accessor (method name starts with "access$").
- static bool IsSyntheticAccessor(MethodReference ref);
+ static bool IsSyntheticAccessor(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
private:
- static bool AnalyseMethodCode(const CodeItemDataAccessor* code_item,
- const MethodReference& method_ref,
- bool is_static,
- ArtMethod* method,
- InlineMethod* result)
- REQUIRES_SHARED(Locks::mutator_lock_);
static bool AnalyseReturnMethod(const CodeItemDataAccessor* code_item, InlineMethod* result);
static bool AnalyseConstMethod(const CodeItemDataAccessor* code_item, InlineMethod* result);
- static bool AnalyseIGetMethod(const CodeItemDataAccessor* code_item,
- const MethodReference& method_ref,
- bool is_static,
- ArtMethod* method,
+ static bool AnalyseIGetMethod(ArtMethod* method,
+ const CodeItemDataAccessor* code_item,
InlineMethod* result)
REQUIRES_SHARED(Locks::mutator_lock_);
- static bool AnalyseIPutMethod(const CodeItemDataAccessor* code_item,
- const MethodReference& method_ref,
- bool is_static,
- ArtMethod* method,
+ static bool AnalyseIPutMethod(ArtMethod* method,
+ const CodeItemDataAccessor* code_item,
InlineMethod* result)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index f57d8ade16..52cbfe8322 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -876,18 +876,35 @@ void InstructionWithAbsorbingInputSimplifier::VisitMul(HMul* instruction) {
void InstructionWithAbsorbingInputSimplifier::VisitOr(HOr* instruction) {
HConstant* input_cst = instruction->GetConstantRight();
-
- if (input_cst == nullptr) {
- return;
- }
-
- if (Int64FromConstant(input_cst) == -1) {
+ if (input_cst != nullptr && Int64FromConstant(input_cst) == -1) {
// Replace code looking like
// OR dst, src, 0xFFF...FF
// with
// CONSTANT 0xFFF...FF
instruction->ReplaceWith(input_cst);
instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
+
+ HInstruction* left = instruction->GetLeft();
+ HInstruction* right = instruction->GetRight();
+ if (left->IsNot() ^ right->IsNot()) {
+ // Replace code looking like
+ // NOT notsrc, src
+ // OR dst, notsrc, src
+ // with
+ // CONSTANT 0xFFF...FF
+ HInstruction* hnot = (left->IsNot() ? left : right);
+ HInstruction* hother = (left->IsNot() ? right : left);
+ HInstruction* src = hnot->AsNot()->GetInput();
+
+ if (src == hother) {
+ DCHECK(instruction->GetType() == DataType::Type::kInt32 ||
+ instruction->GetType() == DataType::Type::kInt64);
+ instruction->ReplaceWith(GetGraph()->GetConstant(instruction->GetType(), -1));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
}
}
@@ -974,6 +991,28 @@ void InstructionWithAbsorbingInputSimplifier::VisitXor(HXor* instruction) {
HBasicBlock* block = instruction->GetBlock();
instruction->ReplaceWith(GetGraph()->GetConstant(type, 0));
block->RemoveInstruction(instruction);
+ return;
+ }
+
+ HInstruction* left = instruction->GetLeft();
+ HInstruction* right = instruction->GetRight();
+ if (left->IsNot() ^ right->IsNot()) {
+ // Replace code looking like
+ // NOT notsrc, src
+ // XOR dst, notsrc, src
+ // with
+ // CONSTANT 0xFFF...FF
+ HInstruction* hnot = (left->IsNot() ? left : right);
+ HInstruction* hother = (left->IsNot() ? right : left);
+ HInstruction* src = hnot->AsNot()->GetInput();
+
+ if (src == hother) {
+ DCHECK(instruction->GetType() == DataType::Type::kInt32 ||
+ instruction->GetType() == DataType::Type::kInt64);
+ instruction->ReplaceWith(GetGraph()->GetConstant(instruction->GetType(), -1));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ }
}
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d7ca17b646..f1e2733f3e 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1619,17 +1619,28 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
return true;
}
+ CodeItemDataAccessor accessor(method->DexInstructionData());
+
+ if (!IsInliningAllowed(method, accessor)) {
+ return false;
+ }
+
+ // We have checked above that inlining is "allowed" to make sure that the method has bytecode
+ // (is not native), is compilable and verified and to enforce the @NeverInline annotation.
+ // However, the pattern substitution is always preferable, so we do it before the check if
+ // inlining is "encouraged". It also has an exception to the `MayInline()` restriction.
+ if (TryPatternSubstitution(invoke_instruction, method, accessor, return_replacement)) {
+ LOG_SUCCESS() << "Successfully replaced pattern of invoke "
+ << method->PrettyMethod();
+ MaybeRecordStat(stats_, MethodCompilationStat::kReplacedInvokeWithSimplePattern);
+ return true;
+ }
+
// Check whether we're allowed to inline. The outermost compilation unit is the relevant
// dex file here (though the transitivity of an inline chain would allow checking the caller).
if (!MayInline(codegen_->GetCompilerOptions(),
*method->GetDexFile(),
*outer_compilation_unit_.GetDexFile())) {
- if (TryPatternSubstitution(invoke_instruction, method, return_replacement)) {
- LOG_SUCCESS() << "Successfully replaced pattern of invoke "
- << method->PrettyMethod();
- MaybeRecordStat(stats_, MethodCompilationStat::kReplacedInvokeWithSimplePattern);
- return true;
- }
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedWont)
<< "Won't inline " << method->PrettyMethod() << " in "
<< outer_compilation_unit_.GetDexFile()->GetLocation() << " ("
@@ -1638,12 +1649,6 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
return false;
}
- CodeItemDataAccessor accessor(method->DexInstructionData());
-
- if (!IsInliningAllowed(method, accessor)) {
- return false;
- }
-
if (!IsInliningSupported(invoke_instruction, method, accessor)) {
return false;
}
@@ -1683,9 +1688,10 @@ static HInstruction* GetInvokeInputForArgVRegIndex(HInvoke* invoke_instruction,
// Try to recognize known simple patterns and replace invoke call with appropriate instructions.
bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
ArtMethod* method,
+ const CodeItemDataAccessor& accessor,
HInstruction** return_replacement) {
InlineMethod inline_method;
- if (!InlineMethodAnalyser::AnalyseMethodCode(method, &inline_method)) {
+ if (!InlineMethodAnalyser::AnalyseMethodCode(method, &accessor, &inline_method)) {
return false;
}
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 48600543c6..57d3364051 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -126,6 +126,7 @@ class HInliner : public HOptimization {
// Try to recognize known simple patterns and replace invoke call with appropriate instructions.
bool TryPatternSubstitution(HInvoke* invoke_instruction,
ArtMethod* method,
+ const CodeItemDataAccessor& accessor,
HInstruction** return_replacement)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/intrinsics_riscv64.cc b/compiler/optimizing/intrinsics_riscv64.cc
index 698c7e5157..7fdb015e56 100644
--- a/compiler/optimizing/intrinsics_riscv64.cc
+++ b/compiler/optimizing/intrinsics_riscv64.cc
@@ -1139,25 +1139,28 @@ static void GenerateCompareAndSet(Riscv64Assembler* assembler,
}
EmitLoadReserved(assembler, type, ptr, old_value, load_aqrl);
XRegister to_store = new_value;
- if (mask != kNoXRegister) {
- DCHECK_EQ(expected2, kNoXRegister);
- DCHECK_NE(masked, kNoXRegister);
- __ And(masked, old_value, mask);
- __ Bne(masked, expected, cmp_failure);
- // The `old_value` does not need to be preserved as the caller shall use `masked`
- // to return the old value if needed.
- to_store = old_value;
- // TODO(riscv64): We could XOR the old and new value before the loop and use a single XOR here
- // instead of the XOR+OR. (The `new_value` is either Zero or a temporary we can clobber.)
- __ Xor(to_store, old_value, masked);
- __ Or(to_store, to_store, new_value);
- } else if (expected2 != kNoXRegister) {
- Riscv64Label match2;
- __ Beq(old_value, expected2, &match2, /*is_bare=*/ true);
- __ Bne(old_value, expected, cmp_failure);
- __ Bind(&match2);
- } else {
- __ Bne(old_value, expected, cmp_failure);
+ {
+ ScopedLrScExtensionsRestriction slser(assembler);
+ if (mask != kNoXRegister) {
+ DCHECK_EQ(expected2, kNoXRegister);
+ DCHECK_NE(masked, kNoXRegister);
+ __ And(masked, old_value, mask);
+ __ Bne(masked, expected, cmp_failure);
+ // The `old_value` does not need to be preserved as the caller shall use `masked`
+ // to return the old value if needed.
+ to_store = old_value;
+ // TODO(riscv64): We could XOR the old and new value before the loop and use a single XOR here
+ // instead of the XOR+OR. (The `new_value` is either Zero or a temporary we can clobber.)
+ __ Xor(to_store, old_value, masked);
+ __ Or(to_store, to_store, new_value);
+ } else if (expected2 != kNoXRegister) {
+ Riscv64Label match2;
+ __ Beq(old_value, expected2, &match2, /*is_bare=*/ true);
+ __ Bne(old_value, expected, cmp_failure);
+ __ Bind(&match2);
+ } else {
+ __ Bne(old_value, expected, cmp_failure);
+ }
}
EmitStoreConditional(assembler, type, ptr, store_result, to_store, store_aqrl);
if (strong) {
@@ -1826,8 +1829,11 @@ static void GenerateGetAndUpdate(CodeGeneratorRISCV64* codegen,
Riscv64Label retry;
__ Bind(&retry);
__ LrW(old_value, ptr, load_aqrl);
- __ And(temp, old_value, mask);
- __ Or(temp, temp, arg);
+ {
+ ScopedLrScExtensionsRestriction slser(assembler);
+ __ And(temp, old_value, mask);
+ __ Or(temp, temp, arg);
+ }
__ ScW(temp, temp, ptr, store_aqrl);
__ Bnez(temp, &retry, /*is_bare=*/ true); // Bare: `TMP` shall not be clobbered.
}
@@ -1845,15 +1851,19 @@ static void GenerateGetAndUpdate(CodeGeneratorRISCV64* codegen,
Riscv64Label retry;
__ Bind(&retry);
__ LrW(old_value, ptr, load_aqrl);
- __ Add(temp, old_value, arg);
- // We use `(A ^ B) ^ A == B` and with the masking `((A ^ B) & mask) ^ A`, the result
- // contains bits from `B` for bits specified in `mask` and bits from `A` elsewhere.
- // Note: These instructions directly depend on each other, so it's not necessarily the
- // fastest approach but for `(A ^ ~mask) | (B & mask)` we would need an extra register for
- // `~mask` because ANDN is not in the "I" instruction set as required for a LR/SC sequence.
- __ Xor(temp, temp, old_value);
- __ And(temp, temp, mask);
- __ Xor(temp, temp, old_value);
+ {
+ ScopedLrScExtensionsRestriction slser(assembler);
+ __ Add(temp, old_value, arg);
+ // We use `(A ^ B) ^ A == B` and with the masking `((A ^ B) & mask) ^ A`, the result
+ // contains bits from `B` for bits specified in `mask` and bits from `A` elsewhere.
+ // Note: These instructions directly depend on each other, so it's not necessarily the
+ // fastest approach but for `(A ^ ~mask) | (B & mask)` we would need an extra register
+ // for `~mask` because ANDN is not in the "I" instruction set as required for a LR/SC
+ // sequence.
+ __ Xor(temp, temp, old_value);
+ __ And(temp, temp, mask);
+ __ Xor(temp, temp, old_value);
+ }
__ ScW(temp, temp, ptr, store_aqrl);
__ Bnez(temp, &retry, /*is_bare=*/ true); // Bare: `TMP` shall not be clobbered.
}
diff --git a/compiler/utils/riscv64/assembler_riscv64.cc b/compiler/utils/riscv64/assembler_riscv64.cc
index eeb4537a31..09778add1e 100644
--- a/compiler/utils/riscv64/assembler_riscv64.cc
+++ b/compiler/utils/riscv64/assembler_riscv64.cc
@@ -114,48 +114,59 @@ void Riscv64Assembler::Bgeu(XRegister rs1, XRegister rs2, int32_t offset) {
// Load instructions (RV32I+RV64I): opcode = 0x03, funct3 from 0x0 ~ 0x6
void Riscv64Assembler::Lb(XRegister rd, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore);
EmitI(offset, rs1, 0x0, rd, 0x03);
}
void Riscv64Assembler::Lh(XRegister rd, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore);
EmitI(offset, rs1, 0x1, rd, 0x03);
}
void Riscv64Assembler::Lw(XRegister rd, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore);
EmitI(offset, rs1, 0x2, rd, 0x03);
}
void Riscv64Assembler::Ld(XRegister rd, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore);
EmitI(offset, rs1, 0x3, rd, 0x03);
}
void Riscv64Assembler::Lbu(XRegister rd, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore);
EmitI(offset, rs1, 0x4, rd, 0x03);
}
void Riscv64Assembler::Lhu(XRegister rd, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore);
EmitI(offset, rs1, 0x5, rd, 0x03);
}
void Riscv64Assembler::Lwu(XRegister rd, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore);
EmitI(offset, rs1, 0x6, rd, 0x3);
}
// Store instructions (RV32I+RV64I): opcode = 0x23, funct3 from 0x0 ~ 0x3
void Riscv64Assembler::Sb(XRegister rs2, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore);
EmitS(offset, rs2, rs1, 0x0, 0x23);
}
void Riscv64Assembler::Sh(XRegister rs2, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore);
EmitS(offset, rs2, rs1, 0x1, 0x23);
}
void Riscv64Assembler::Sw(XRegister rs2, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore);
EmitS(offset, rs2, rs1, 0x2, 0x23);
}
void Riscv64Assembler::Sd(XRegister rs2, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore);
EmitS(offset, rs2, rs1, 0x3, 0x23);
}
@@ -313,7 +324,10 @@ void Riscv64Assembler::FenceTso() {
/////////////////////////// RV64 "Zifencei" Instructions START ////////////////////////////
// "Zifencei" Standard Extension, opcode = 0xf, funct3 = 1
-void Riscv64Assembler::FenceI() { EmitI(0x0, 0x0, 0x1, 0x0, 0xf); }
+void Riscv64Assembler::FenceI() {
+ AssertExtensionsEnabled(Riscv64Extension::kZifencei);
+ EmitI(0x0, 0x0, 0x1, 0x0, 0xf);
+}
//////////////////////////// RV64 "Zifencei" Instructions END /////////////////////////////
@@ -322,56 +336,69 @@ void Riscv64Assembler::FenceI() { EmitI(0x0, 0x0, 0x1, 0x0, 0xf); }
// RV32M Standard Extension: opcode = 0x33, funct3 from 0x0 ~ 0x7
void Riscv64Assembler::Mul(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x0, rd, 0x33);
}
void Riscv64Assembler::Mulh(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x1, rd, 0x33);
}
void Riscv64Assembler::Mulhsu(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x2, rd, 0x33);
}
void Riscv64Assembler::Mulhu(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x3, rd, 0x33);
}
void Riscv64Assembler::Div(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x4, rd, 0x33);
}
void Riscv64Assembler::Divu(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x5, rd, 0x33);
}
void Riscv64Assembler::Rem(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x6, rd, 0x33);
}
void Riscv64Assembler::Remu(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x7, rd, 0x33);
}
// RV64M Standard Extension: opcode = 0x3b, funct3 0x0 and from 0x4 ~ 0x7
void Riscv64Assembler::Mulw(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x0, rd, 0x3b);
}
void Riscv64Assembler::Divw(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x4, rd, 0x3b);
}
void Riscv64Assembler::Divuw(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x5, rd, 0x3b);
}
void Riscv64Assembler::Remw(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x6, rd, 0x3b);
}
void Riscv64Assembler::Remuw(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kM);
EmitR(0x1, rs2, rs1, 0x7, rd, 0x3b);
}
@@ -380,94 +407,116 @@ void Riscv64Assembler::Remuw(XRegister rd, XRegister rs1, XRegister rs2) {
/////////////////////////////// RV64 "A" Instructions START ///////////////////////////////
void Riscv64Assembler::LrW(XRegister rd, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
CHECK(aqrl != AqRl::kRelease);
EmitR4(0x2, enum_cast<uint32_t>(aqrl), 0x0, rs1, 0x2, rd, 0x2f);
}
void Riscv64Assembler::LrD(XRegister rd, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
CHECK(aqrl != AqRl::kRelease);
EmitR4(0x2, enum_cast<uint32_t>(aqrl), 0x0, rs1, 0x3, rd, 0x2f);
}
void Riscv64Assembler::ScW(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
CHECK(aqrl != AqRl::kAcquire);
EmitR4(0x3, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x2, rd, 0x2f);
}
void Riscv64Assembler::ScD(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
CHECK(aqrl != AqRl::kAcquire);
EmitR4(0x3, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x3, rd, 0x2f);
}
void Riscv64Assembler::AmoSwapW(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x1, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x2, rd, 0x2f);
}
void Riscv64Assembler::AmoSwapD(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x1, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x3, rd, 0x2f);
}
void Riscv64Assembler::AmoAddW(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x0, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x2, rd, 0x2f);
}
void Riscv64Assembler::AmoAddD(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x0, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x3, rd, 0x2f);
}
void Riscv64Assembler::AmoXorW(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x4, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x2, rd, 0x2f);
}
void Riscv64Assembler::AmoXorD(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x4, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x3, rd, 0x2f);
}
void Riscv64Assembler::AmoAndW(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0xc, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x2, rd, 0x2f);
}
void Riscv64Assembler::AmoAndD(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0xc, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x3, rd, 0x2f);
}
void Riscv64Assembler::AmoOrW(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x8, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x2, rd, 0x2f);
}
void Riscv64Assembler::AmoOrD(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x8, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x3, rd, 0x2f);
}
void Riscv64Assembler::AmoMinW(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x10, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x2, rd, 0x2f);
}
void Riscv64Assembler::AmoMinD(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x10, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x3, rd, 0x2f);
}
void Riscv64Assembler::AmoMaxW(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x14, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x2, rd, 0x2f);
}
void Riscv64Assembler::AmoMaxD(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x14, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x3, rd, 0x2f);
}
void Riscv64Assembler::AmoMinuW(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x18, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x2, rd, 0x2f);
}
void Riscv64Assembler::AmoMinuD(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x18, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x3, rd, 0x2f);
}
void Riscv64Assembler::AmoMaxuW(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x1c, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x2, rd, 0x2f);
}
void Riscv64Assembler::AmoMaxuD(XRegister rd, XRegister rs2, XRegister rs1, AqRl aqrl) {
+ AssertExtensionsEnabled(Riscv64Extension::kA);
EmitR4(0x1c, enum_cast<uint32_t>(aqrl), rs2, rs1, 0x3, rd, 0x2f);
}
@@ -478,26 +527,32 @@ void Riscv64Assembler::AmoMaxuD(XRegister rd, XRegister rs2, XRegister rs1, AqRl
// "Zicsr" Standard Extension, opcode = 0x73, funct3 from 0x1 ~ 0x3 and 0x5 ~ 0x7
void Riscv64Assembler::Csrrw(XRegister rd, uint32_t csr, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZicsr);
EmitI(ToInt12(csr), rs1, 0x1, rd, 0x73);
}
void Riscv64Assembler::Csrrs(XRegister rd, uint32_t csr, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZicsr);
EmitI(ToInt12(csr), rs1, 0x2, rd, 0x73);
}
void Riscv64Assembler::Csrrc(XRegister rd, uint32_t csr, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZicsr);
EmitI(ToInt12(csr), rs1, 0x3, rd, 0x73);
}
void Riscv64Assembler::Csrrwi(XRegister rd, uint32_t csr, uint32_t uimm5) {
+ AssertExtensionsEnabled(Riscv64Extension::kZicsr);
EmitI(ToInt12(csr), uimm5, 0x5, rd, 0x73);
}
void Riscv64Assembler::Csrrsi(XRegister rd, uint32_t csr, uint32_t uimm5) {
+ AssertExtensionsEnabled(Riscv64Extension::kZicsr);
EmitI(ToInt12(csr), uimm5, 0x6, rd, 0x73);
}
void Riscv64Assembler::Csrrci(XRegister rd, uint32_t csr, uint32_t uimm5) {
+ AssertExtensionsEnabled(Riscv64Extension::kZicsr);
EmitI(ToInt12(csr), uimm5, 0x7, rd, 0x73);
}
@@ -508,18 +563,22 @@ void Riscv64Assembler::Csrrci(XRegister rd, uint32_t csr, uint32_t uimm5) {
// FP load/store instructions (RV32F+RV32D): opcode = 0x07, 0x27
void Riscv64Assembler::FLw(FRegister rd, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kF);
EmitI(offset, rs1, 0x2, rd, 0x07);
}
void Riscv64Assembler::FLd(FRegister rd, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kD);
EmitI(offset, rs1, 0x3, rd, 0x07);
}
void Riscv64Assembler::FSw(FRegister rs2, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kF);
EmitS(offset, rs2, rs1, 0x2, 0x27);
}
void Riscv64Assembler::FSd(FRegister rs2, XRegister rs1, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kD);
EmitS(offset, rs2, rs1, 0x3, 0x27);
}
@@ -527,131 +586,161 @@ void Riscv64Assembler::FSd(FRegister rs2, XRegister rs1, int32_t offset) {
void Riscv64Assembler::FMAddS(
FRegister rd, FRegister rs1, FRegister rs2, FRegister rs3, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR4(rs3, 0x0, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x43);
}
void Riscv64Assembler::FMAddD(
FRegister rd, FRegister rs1, FRegister rs2, FRegister rs3, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR4(rs3, 0x1, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x43);
}
void Riscv64Assembler::FMSubS(
FRegister rd, FRegister rs1, FRegister rs2, FRegister rs3, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR4(rs3, 0x0, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x47);
}
void Riscv64Assembler::FMSubD(
FRegister rd, FRegister rs1, FRegister rs2, FRegister rs3, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR4(rs3, 0x1, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x47);
}
void Riscv64Assembler::FNMSubS(
FRegister rd, FRegister rs1, FRegister rs2, FRegister rs3, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR4(rs3, 0x0, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x4b);
}
void Riscv64Assembler::FNMSubD(
FRegister rd, FRegister rs1, FRegister rs2, FRegister rs3, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR4(rs3, 0x1, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x4b);
}
void Riscv64Assembler::FNMAddS(
FRegister rd, FRegister rs1, FRegister rs2, FRegister rs3, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR4(rs3, 0x0, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x4f);
}
void Riscv64Assembler::FNMAddD(
FRegister rd, FRegister rs1, FRegister rs2, FRegister rs3, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR4(rs3, 0x1, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x4f);
}
// Simple FP instructions (RV32F+RV32D): opcode = 0x53, funct7 = 0b0XXXX0D
void Riscv64Assembler::FAddS(FRegister rd, FRegister rs1, FRegister rs2, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x0, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FAddD(FRegister rd, FRegister rs1, FRegister rs2, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x1, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FSubS(FRegister rd, FRegister rs1, FRegister rs2, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x4, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FSubD(FRegister rd, FRegister rs1, FRegister rs2, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x5, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FMulS(FRegister rd, FRegister rs1, FRegister rs2, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x8, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FMulD(FRegister rd, FRegister rs1, FRegister rs2, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x9, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FDivS(FRegister rd, FRegister rs1, FRegister rs2, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0xc, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FDivD(FRegister rd, FRegister rs1, FRegister rs2, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0xd, rs2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FSqrtS(FRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x2c, 0x0, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FSqrtD(FRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x2d, 0x0, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FSgnjS(FRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x10, rs2, rs1, 0x0, rd, 0x53);
}
void Riscv64Assembler::FSgnjD(FRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x11, rs2, rs1, 0x0, rd, 0x53);
}
void Riscv64Assembler::FSgnjnS(FRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x10, rs2, rs1, 0x1, rd, 0x53);
}
void Riscv64Assembler::FSgnjnD(FRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x11, rs2, rs1, 0x1, rd, 0x53);
}
void Riscv64Assembler::FSgnjxS(FRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x10, rs2, rs1, 0x2, rd, 0x53);
}
void Riscv64Assembler::FSgnjxD(FRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x11, rs2, rs1, 0x2, rd, 0x53);
}
void Riscv64Assembler::FMinS(FRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x14, rs2, rs1, 0x0, rd, 0x53);
}
void Riscv64Assembler::FMinD(FRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x15, rs2, rs1, 0x0, rd, 0x53);
}
void Riscv64Assembler::FMaxS(FRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x14, rs2, rs1, 0x1, rd, 0x53);
}
void Riscv64Assembler::FMaxD(FRegister rd, FRegister rs1, FRegister rs2) {
EmitR(0x15, rs2, rs1, 0x1, rd, 0x53);
+ AssertExtensionsEnabled(Riscv64Extension::kD);
}
void Riscv64Assembler::FCvtSD(FRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF, Riscv64Extension::kD);
EmitR(0x20, 0x1, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtDS(FRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF, Riscv64Extension::kD);
// Note: The `frm` is useless, the result can represent every value of the source exactly.
EmitR(0x21, 0x0, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
@@ -659,122 +748,150 @@ void Riscv64Assembler::FCvtDS(FRegister rd, FRegister rs1, FPRoundingMode frm) {
// FP compare instructions (RV32F+RV32D): opcode = 0x53, funct7 = 0b101000D
void Riscv64Assembler::FEqS(XRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x50, rs2, rs1, 0x2, rd, 0x53);
}
void Riscv64Assembler::FEqD(XRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x51, rs2, rs1, 0x2, rd, 0x53);
}
void Riscv64Assembler::FLtS(XRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x50, rs2, rs1, 0x1, rd, 0x53);
}
void Riscv64Assembler::FLtD(XRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x51, rs2, rs1, 0x1, rd, 0x53);
}
void Riscv64Assembler::FLeS(XRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x50, rs2, rs1, 0x0, rd, 0x53);
}
void Riscv64Assembler::FLeD(XRegister rd, FRegister rs1, FRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x51, rs2, rs1, 0x0, rd, 0x53);
}
// FP conversion instructions (RV32F+RV32D+RV64F+RV64D): opcode = 0x53, funct7 = 0b110X00D
void Riscv64Assembler::FCvtWS(XRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x60, 0x0, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtWD(XRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x61, 0x0, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtWuS(XRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x60, 0x1, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtWuD(XRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x61, 0x1, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtLS(XRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x60, 0x2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtLD(XRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x61, 0x2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtLuS(XRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x60, 0x3, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtLuD(XRegister rd, FRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x61, 0x3, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtSW(FRegister rd, XRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x68, 0x0, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtDW(FRegister rd, XRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
// Note: The `frm` is useless, the result can represent every value of the source exactly.
EmitR(0x69, 0x0, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtSWu(FRegister rd, XRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x68, 0x1, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtDWu(FRegister rd, XRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
// Note: The `frm` is useless, the result can represent every value of the source exactly.
EmitR(0x69, 0x1, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtSL(FRegister rd, XRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x68, 0x2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtDL(FRegister rd, XRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x69, 0x2, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtSLu(FRegister rd, XRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x68, 0x3, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
void Riscv64Assembler::FCvtDLu(FRegister rd, XRegister rs1, FPRoundingMode frm) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x69, 0x3, rs1, enum_cast<uint32_t>(frm), rd, 0x53);
}
// FP move instructions (RV32F+RV32D): opcode = 0x53, funct3 = 0x0, funct7 = 0b111X00D
void Riscv64Assembler::FMvXW(XRegister rd, FRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x70, 0x0, rs1, 0x0, rd, 0x53);
}
void Riscv64Assembler::FMvXD(XRegister rd, FRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x71, 0x0, rs1, 0x0, rd, 0x53);
}
void Riscv64Assembler::FMvWX(FRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x78, 0x0, rs1, 0x0, rd, 0x53);
}
void Riscv64Assembler::FMvDX(FRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x79, 0x0, rs1, 0x0, rd, 0x53);
}
// FP classify instructions (RV32F+RV32D): opcode = 0x53, funct3 = 0x1, funct7 = 0b111X00D
void Riscv64Assembler::FClassS(XRegister rd, FRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kF);
EmitR(0x70, 0x0, rs1, 0x1, rd, 0x53);
}
void Riscv64Assembler::FClassD(XRegister rd, FRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kD);
EmitR(0x71, 0x0, rs1, 0x1, rd, 0x53);
}
@@ -783,86 +900,101 @@ void Riscv64Assembler::FClassD(XRegister rd, FRegister rs1) {
/////////////////////////////// RV64 "C" Instructions START /////////////////////////////
void Riscv64Assembler::CLwsp(XRegister rd, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZca);
DCHECK_NE(rd, Zero);
-
EmitCI(0b010u, rd, ExtractOffset52_76(offset), 0b10u);
}
void Riscv64Assembler::CLdsp(XRegister rd, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZca);
DCHECK_NE(rd, Zero);
-
EmitCI(0b011u, rd, ExtractOffset53_86(offset), 0b10u);
}
void Riscv64Assembler::CFLdsp(FRegister rd, int32_t offset) {
+ AssertExtensionsEnabled(
+ Riscv64Extension::kLoadStore, Riscv64Extension::kZcd, Riscv64Extension::kD);
EmitCI(0b001u, rd, ExtractOffset53_86(offset), 0b10u);
}
void Riscv64Assembler::CSwsp(XRegister rs2, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZca);
EmitCSS(0b110u, ExtractOffset52_76(offset), rs2, 0b10u);
}
void Riscv64Assembler::CSdsp(XRegister rs2, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZca);
EmitCSS(0b111u, ExtractOffset53_86(offset), rs2, 0b10u);
}
void Riscv64Assembler::CFSdsp(FRegister rs2, int32_t offset) {
+ AssertExtensionsEnabled(
+ Riscv64Extension::kLoadStore, Riscv64Extension::kZcd, Riscv64Extension::kD);
EmitCSS(0b101u, ExtractOffset53_86(offset), rs2, 0b10u);
}
void Riscv64Assembler::CLw(XRegister rd_s, XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZca);
EmitCM(0b010u, ExtractOffset52_6(offset), rs1_s, rd_s, 0b00u);
}
void Riscv64Assembler::CLd(XRegister rd_s, XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZca);
EmitCM(0b011u, ExtractOffset53_76(offset), rs1_s, rd_s, 0b00u);
}
void Riscv64Assembler::CFLd(FRegister rd_s, XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(
+ Riscv64Extension::kLoadStore, Riscv64Extension::kZcd, Riscv64Extension::kD);
EmitCM(0b001u, ExtractOffset53_76(offset), rs1_s, rd_s, 0b00u);
}
void Riscv64Assembler::CSw(XRegister rs2_s, XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZca);
EmitCM(0b110u, ExtractOffset52_6(offset), rs1_s, rs2_s, 0b00u);
}
void Riscv64Assembler::CSd(XRegister rs2_s, XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZca);
EmitCM(0b111u, ExtractOffset53_76(offset), rs1_s, rs2_s, 0b00u);
}
void Riscv64Assembler::CFSd(FRegister rs2_s, XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(
+ Riscv64Extension::kLoadStore, Riscv64Extension::kZcd, Riscv64Extension::kD);
EmitCM(0b101u, ExtractOffset53_76(offset), rs1_s, rs2_s, 0b00u);
}
void Riscv64Assembler::CLi(XRegister rd, int32_t imm) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(rd, Zero);
DCHECK(IsInt<6>(imm));
-
EmitCI(0b010u, rd, EncodeInt6(imm), 0b01u);
}
void Riscv64Assembler::CLui(XRegister rd, uint32_t nzimm6) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(rd, Zero);
DCHECK_NE(rd, SP);
DCHECK(IsImmCLuiEncodable(nzimm6));
-
EmitCI(0b011u, rd, nzimm6 & MaskLeastSignificant<uint32_t>(6), 0b01u);
}
void Riscv64Assembler::CAddi(XRegister rd, int32_t nzimm) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(rd, Zero);
DCHECK_NE(nzimm, 0);
-
EmitCI(0b000u, rd, EncodeInt6(nzimm), 0b01u);
}
void Riscv64Assembler::CAddiw(XRegister rd, int32_t imm) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(rd, Zero);
-
EmitCI(0b001u, rd, EncodeInt6(imm), 0b01u);
}
void Riscv64Assembler::CAddi16Sp(int32_t nzimm) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(nzimm, 0);
DCHECK(IsAligned<16>(nzimm));
@@ -880,6 +1012,7 @@ void Riscv64Assembler::CAddi16Sp(int32_t nzimm) {
}
void Riscv64Assembler::CAddi4Spn(XRegister rd_s, uint32_t nzuimm) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(nzuimm, 0u);
DCHECK(IsAligned<4>(nzuimm));
DCHECK(IsUint<10>(nzuimm));
@@ -894,187 +1027,227 @@ void Riscv64Assembler::CAddi4Spn(XRegister rd_s, uint32_t nzuimm) {
}
void Riscv64Assembler::CSlli(XRegister rd, int32_t shamt) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(shamt, 0);
DCHECK_NE(rd, Zero);
-
EmitCI(0b000u, rd, shamt, 0b10u);
}
void Riscv64Assembler::CSrli(XRegister rd_s, int32_t shamt) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(shamt, 0);
DCHECK(IsUint<6>(shamt));
-
EmitCBArithmetic(0b100u, 0b00u, shamt, rd_s, 0b01u);
}
void Riscv64Assembler::CSrai(XRegister rd_s, int32_t shamt) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(shamt, 0);
DCHECK(IsUint<6>(shamt));
-
EmitCBArithmetic(0b100u, 0b01u, shamt, rd_s, 0b01u);
}
void Riscv64Assembler::CAndi(XRegister rd_s, int32_t imm) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK(IsInt<6>(imm));
-
EmitCBArithmetic(0b100u, 0b10u, imm, rd_s, 0b01u);
}
void Riscv64Assembler::CMv(XRegister rd, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(rd, Zero);
DCHECK_NE(rs2, Zero);
-
EmitCR(0b1000u, rd, rs2, 0b10u);
}
void Riscv64Assembler::CAdd(XRegister rd, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(rd, Zero);
DCHECK_NE(rs2, Zero);
-
EmitCR(0b1001u, rd, rs2, 0b10u);
}
void Riscv64Assembler::CAnd(XRegister rd_s, XRegister rs2_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
EmitCAReg(0b100011u, rd_s, 0b11u, rs2_s, 0b01u);
}
void Riscv64Assembler::COr(XRegister rd_s, XRegister rs2_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
EmitCAReg(0b100011u, rd_s, 0b10u, rs2_s, 0b01u);
}
void Riscv64Assembler::CXor(XRegister rd_s, XRegister rs2_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
EmitCAReg(0b100011u, rd_s, 0b01u, rs2_s, 0b01u);
}
void Riscv64Assembler::CSub(XRegister rd_s, XRegister rs2_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
EmitCAReg(0b100011u, rd_s, 0b00u, rs2_s, 0b01u);
}
void Riscv64Assembler::CAddw(XRegister rd_s, XRegister rs2_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
EmitCAReg(0b100111u, rd_s, 0b01u, rs2_s, 0b01u);
}
void Riscv64Assembler::CSubw(XRegister rd_s, XRegister rs2_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
EmitCAReg(0b100111u, rd_s, 0b00u, rs2_s, 0b01u);
}
// "Zcb" Standard Extension, part of "C", opcode = 0b00, 0b01, funct3 = 0b100.
void Riscv64Assembler::CLbu(XRegister rd_s, XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZcb);
EmitCAReg(0b100000u, rs1_s, EncodeOffset0_1(offset), rd_s, 0b00u);
}
void Riscv64Assembler::CLhu(XRegister rd_s, XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZcb);
DCHECK(IsUint<2>(offset));
DCHECK_ALIGNED(offset, 2);
EmitCAReg(0b100001u, rs1_s, BitFieldExtract<uint32_t>(offset, 1, 1), rd_s, 0b00u);
}
void Riscv64Assembler::CLh(XRegister rd_s, XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZcb);
DCHECK(IsUint<2>(offset));
DCHECK_ALIGNED(offset, 2);
EmitCAReg(0b100001u, rs1_s, 0b10 | BitFieldExtract<uint32_t>(offset, 1, 1), rd_s, 0b00u);
}
void Riscv64Assembler::CSb(XRegister rs2_s, XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZcb);
EmitCAReg(0b100010u, rs1_s, EncodeOffset0_1(offset), rs2_s, 0b00u);
}
void Riscv64Assembler::CSh(XRegister rs2_s, XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kZcb);
DCHECK(IsUint<2>(offset));
DCHECK_ALIGNED(offset, 2);
EmitCAReg(0b100011u, rs1_s, BitFieldExtract<uint32_t>(offset, 1, 1), rs2_s, 0b00u);
}
-void Riscv64Assembler::CZext_b(XRegister rd_rs1_s) {
+void Riscv64Assembler::CZextB(XRegister rd_rs1_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZcb);
EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b000u, 0b01u);
}
-void Riscv64Assembler::CSext_b(XRegister rd_rs1_s) {
+void Riscv64Assembler::CSextB(XRegister rd_rs1_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb, Riscv64Extension::kZcb);
EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b001u, 0b01u);
}
-void Riscv64Assembler::CZext_h(XRegister rd_rs1_s) {
+void Riscv64Assembler::CZextH(XRegister rd_rs1_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb, Riscv64Extension::kZcb);
EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b010u, 0b01u);
}
-void Riscv64Assembler::CSext_h(XRegister rd_rs1_s) {
+void Riscv64Assembler::CSextH(XRegister rd_rs1_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb, Riscv64Extension::kZcb);
EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b011u, 0b01u);
}
-void Riscv64Assembler::CZext_w(XRegister rd_rs1_s) {
+void Riscv64Assembler::CZextW(XRegister rd_rs1_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZba, Riscv64Extension::kZcb);
EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b100u, 0b01u);
}
void Riscv64Assembler::CNot(XRegister rd_rs1_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kZcb);
EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b101u, 0b01u);
}
void Riscv64Assembler::CMul(XRegister rd_s, XRegister rs2_s) {
+ AssertExtensionsEnabled(Riscv64Extension::kM, Riscv64Extension::kZcb);
EmitCAReg(0b100111u, rd_s, 0b10u, rs2_s, 0b01u);
}
-void Riscv64Assembler::CJ(int32_t offset) { EmitCJ(0b101u, offset, 0b01u); }
+void Riscv64Assembler::CJ(int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
+ EmitCJ(0b101u, offset, 0b01u);
+}
void Riscv64Assembler::CJr(XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(rs1, Zero);
-
EmitCR(0b1000u, rs1, Zero, 0b10u);
}
void Riscv64Assembler::CJalr(XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
DCHECK_NE(rs1, Zero);
-
EmitCR(0b1001u, rs1, Zero, 0b10u);
}
void Riscv64Assembler::CBeqz(XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
EmitCBBranch(0b110u, offset, rs1_s, 0b01u);
}
void Riscv64Assembler::CBnez(XRegister rs1_s, int32_t offset) {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
EmitCBBranch(0b111u, offset, rs1_s, 0b01u);
}
-void Riscv64Assembler::CEbreak() { EmitCR(0b1001u, Zero, Zero, 0b10u); }
+void Riscv64Assembler::CEbreak() {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
+ EmitCR(0b1001u, Zero, Zero, 0b10u);
+}
-void Riscv64Assembler::CNop() { EmitCI(0b000u, Zero, 0u, 0b01u); }
+void Riscv64Assembler::CNop() {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
+ EmitCI(0b000u, Zero, 0u, 0b01u);
+}
-void Riscv64Assembler::CUnimp() { Emit16(0x0u); }
+void Riscv64Assembler::CUnimp() {
+ AssertExtensionsEnabled(Riscv64Extension::kZca);
+ Emit16(0x0u);
+}
/////////////////////////////// RV64 "C" Instructions END ///////////////////////////////
////////////////////////////// RV64 "Zba" Instructions START /////////////////////////////
void Riscv64Assembler::AddUw(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZba);
EmitR(0x4, rs2, rs1, 0x0, rd, 0x3b);
}
void Riscv64Assembler::Sh1Add(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZba);
EmitR(0x10, rs2, rs1, 0x2, rd, 0x33);
}
void Riscv64Assembler::Sh1AddUw(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZba);
EmitR(0x10, rs2, rs1, 0x2, rd, 0x3b);
}
void Riscv64Assembler::Sh2Add(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZba);
EmitR(0x10, rs2, rs1, 0x4, rd, 0x33);
}
void Riscv64Assembler::Sh2AddUw(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZba);
EmitR(0x10, rs2, rs1, 0x4, rd, 0x3b);
}
void Riscv64Assembler::Sh3Add(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZba);
EmitR(0x10, rs2, rs1, 0x6, rd, 0x33);
}
void Riscv64Assembler::Sh3AddUw(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZba);
EmitR(0x10, rs2, rs1, 0x6, rd, 0x3b);
}
void Riscv64Assembler::SlliUw(XRegister rd, XRegister rs1, int32_t shamt) {
+ AssertExtensionsEnabled(Riscv64Extension::kZba);
EmitI6(0x2, shamt, rs1, 0x1, rd, 0x1b);
}
@@ -1083,100 +1256,124 @@ void Riscv64Assembler::SlliUw(XRegister rd, XRegister rs1, int32_t shamt) {
////////////////////////////// RV64 "Zbb" Instructions START /////////////////////////////
void Riscv64Assembler::Andn(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x20, rs2, rs1, 0x7, rd, 0x33);
}
void Riscv64Assembler::Orn(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x20, rs2, rs1, 0x6, rd, 0x33);
}
void Riscv64Assembler::Xnor(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x20, rs2, rs1, 0x4, rd, 0x33);
}
void Riscv64Assembler::Clz(XRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, 0x0, rs1, 0x1, rd, 0x13);
}
void Riscv64Assembler::Clzw(XRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, 0x0, rs1, 0x1, rd, 0x1b);
}
void Riscv64Assembler::Ctz(XRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, 0x1, rs1, 0x1, rd, 0x13);
}
void Riscv64Assembler::Ctzw(XRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, 0x1, rs1, 0x1, rd, 0x1b);
}
void Riscv64Assembler::Cpop(XRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, 0x2, rs1, 0x1, rd, 0x13);
}
void Riscv64Assembler::Cpopw(XRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, 0x2, rs1, 0x1, rd, 0x1b);
}
void Riscv64Assembler::Min(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x5, rs2, rs1, 0x4, rd, 0x33);
}
void Riscv64Assembler::Minu(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x5, rs2, rs1, 0x5, rd, 0x33);
}
void Riscv64Assembler::Max(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x5, rs2, rs1, 0x6, rd, 0x33);
}
void Riscv64Assembler::Maxu(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x5, rs2, rs1, 0x7, rd, 0x33);
}
void Riscv64Assembler::Rol(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, rs2, rs1, 0x1, rd, 0x33);
}
void Riscv64Assembler::Rolw(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, rs2, rs1, 0x1, rd, 0x3b);
}
void Riscv64Assembler::Ror(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, rs2, rs1, 0x5, rd, 0x33);
}
void Riscv64Assembler::Rorw(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, rs2, rs1, 0x5, rd, 0x3b);
}
void Riscv64Assembler::Rori(XRegister rd, XRegister rs1, int32_t shamt) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
CHECK_LT(static_cast<uint32_t>(shamt), 64u);
EmitI6(0x18, shamt, rs1, 0x5, rd, 0x13);
}
void Riscv64Assembler::Roriw(XRegister rd, XRegister rs1, int32_t shamt) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
CHECK_LT(static_cast<uint32_t>(shamt), 32u);
EmitI6(0x18, shamt, rs1, 0x5, rd, 0x1b);
}
void Riscv64Assembler::OrcB(XRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x14, 0x7, rs1, 0x5, rd, 0x13);
}
void Riscv64Assembler::Rev8(XRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x35, 0x18, rs1, 0x5, rd, 0x13);
}
void Riscv64Assembler::ZbbSextB(XRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, 0x4, rs1, 0x1, rd, 0x13);
}
void Riscv64Assembler::ZbbSextH(XRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x30, 0x5, rs1, 0x1, rd, 0x13);
}
void Riscv64Assembler::ZbbZextH(XRegister rd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kZbb);
EmitR(0x4, 0x0, rs1, 0x4, rd, 0x3b);
}
@@ -1185,17 +1382,20 @@ void Riscv64Assembler::ZbbZextH(XRegister rd, XRegister rs1) {
/////////////////////////////// RVV "VSet" Instructions START ////////////////////////////
void Riscv64Assembler::VSetvli(XRegister rd, XRegister rs1, uint32_t vtypei) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(IsUint<11>(vtypei));
EmitI(vtypei, rs1, enum_cast<uint32_t>(VAIEncoding::kOPCFG), rd, 0x57);
}
void Riscv64Assembler::VSetivli(XRegister rd, uint32_t uimm, uint32_t vtypei) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(IsUint<10>(vtypei));
DCHECK(IsUint<5>(uimm));
EmitI((~0U << 10 | vtypei), uimm, enum_cast<uint32_t>(VAIEncoding::kOPCFG), rd, 0x57);
}
void Riscv64Assembler::VSetvl(XRegister rd, XRegister rs1, XRegister rs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
EmitR(0x40, rs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPCFG), rd, 0x57);
}
@@ -1204,1698 +1404,2004 @@ void Riscv64Assembler::VSetvl(XRegister rd, XRegister rs1, XRegister rs2) {
/////////////////////////////// RVV Load/Store Instructions START ////////////////////////////
void Riscv64Assembler::VLe8(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLe16(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLe32(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLe64(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VSe8(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSe16(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSe32(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSe64(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VLm(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01011, rs1, enum_cast<uint32_t>(VectorWidth::kMask), vd, 0x7);
}
void Riscv64Assembler::VSm(VRegister vs3, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01011, rs1, enum_cast<uint32_t>(VectorWidth::kMask), vs3, 0x27);
}
void Riscv64Assembler::VLe8ff(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b10000, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLe16ff(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b10000, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLe32ff(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b10000, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLe64ff(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b10000, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLse8(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLse16(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLse32(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLse64(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VSse8(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSse16(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSse32(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSse64(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VLoxei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLoxei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLoxei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLoxei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLuxei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLuxei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLuxei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLuxei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VSoxei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSoxei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSoxei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSoxei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSuxei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSuxei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSuxei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSuxei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VLseg2e8(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg2e16(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg2e32(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg2e64(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg3e8(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg3e16(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg3e32(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg3e64(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg4e8(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg4e16(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg4e32(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg4e64(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg5e8(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg5e16(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg5e32(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg5e64(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg6e8(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg6e16(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg6e32(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg6e64(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg7e8(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg7e16(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg7e32(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg7e64(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg8e8(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg8e16(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg8e32(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg8e64(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VSseg2e8(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSseg2e16(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSseg2e32(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSseg2e64(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSseg3e8(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSseg3e16(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSseg3e32(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSseg3e64(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSseg4e8(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSseg4e16(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSseg4e32(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSseg4e64(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSseg5e8(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSseg5e16(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSseg5e32(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSseg5e64(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSseg6e8(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSseg6e16(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSseg6e32(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSseg6e64(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSseg7e8(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSseg7e16(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSseg7e32(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSseg7e64(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSseg8e8(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSseg8e16(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSseg8e32(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSseg8e64(VRegister vs3, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b00000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VLseg2e8ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg2e16ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg2e32ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg2e64ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg3e8ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg3e16ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg3e32ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg3e64ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg4e8ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg4e16ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg4e32ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg4e64ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg5e8ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg5e16ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg5e32ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg5e64ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg6e8ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg6e16ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg6e32ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg6e64ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg7e8ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg7e16ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg7e32ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg7e64ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLseg8e8ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLseg8e16ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLseg8e32ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLseg8e64ff(VRegister vd, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, vm);
EmitR(funct7, 0b10000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLsseg2e8(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLsseg2e16(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLsseg2e32(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLsseg2e64(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLsseg3e8(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLsseg3e16(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLsseg3e32(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLsseg3e64(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLsseg4e8(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLsseg4e16(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLsseg4e32(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLsseg4e64(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLsseg5e8(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLsseg5e16(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLsseg5e32(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLsseg5e64(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLsseg6e8(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLsseg6e16(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLsseg6e32(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLsseg6e64(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLsseg7e8(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLsseg7e16(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLsseg7e32(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLsseg7e64(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLsseg8e8(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLsseg8e16(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLsseg8e32(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLsseg8e64(VRegister vd, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VSsseg2e8(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSsseg2e16(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSsseg2e32(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSsseg2e64(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSsseg3e8(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSsseg3e16(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSsseg3e32(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSsseg3e64(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSsseg4e8(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSsseg4e16(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSsseg4e32(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSsseg4e64(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSsseg5e8(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSsseg5e16(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSsseg5e32(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSsseg5e64(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSsseg6e8(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSsseg6e16(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSsseg6e32(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSsseg6e64(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSsseg7e8(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSsseg7e16(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSsseg7e32(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSsseg7e64(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSsseg8e8(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSsseg8e16(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSsseg8e32(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSsseg8e64(VRegister vs3, XRegister rs1, XRegister rs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kStrided, vm);
EmitR(funct7, rs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VLuxseg2ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLuxseg2ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLuxseg2ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLuxseg2ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLuxseg3ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLuxseg3ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLuxseg3ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLuxseg3ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLuxseg4ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLuxseg4ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLuxseg4ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLuxseg4ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLuxseg5ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLuxseg5ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLuxseg5ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLuxseg5ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLuxseg6ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLuxseg6ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLuxseg6ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLuxseg6ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLuxseg7ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLuxseg7ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLuxseg7ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLuxseg7ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLuxseg8ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLuxseg8ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLuxseg8ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLuxseg8ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VSuxseg2ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg2ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg2ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg2ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg3ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg3ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg3ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg3ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg4ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg4ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg4ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg4ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg5ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg5ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg5ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg5ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg6ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg6ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg6ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg6ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg7ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg7ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg7ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg7ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg8ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg8ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg8ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSuxseg8ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedUnordered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VLoxseg2ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLoxseg2ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLoxseg2ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLoxseg2ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLoxseg3ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLoxseg3ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLoxseg3ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLoxseg3ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLoxseg4ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLoxseg4ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLoxseg4ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLoxseg4ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLoxseg5ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLoxseg5ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLoxseg5ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLoxseg5ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLoxseg6ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLoxseg6ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLoxseg6ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLoxseg6ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLoxseg7ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLoxseg7ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLoxseg7ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLoxseg7ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VLoxseg8ei8(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VLoxseg8ei16(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VLoxseg8ei32(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VLoxseg8ei64(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VSoxseg2ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg2ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg2ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg2ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg3ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg3ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg3ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg3ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k3, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg4ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg4ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg4ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg4ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg5ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg5ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg5ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg5ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k5, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg6ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg6ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg6ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg6ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k6, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg7ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg7ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg7ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg7ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k7, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg8ei8(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k8), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg8ei16(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k16), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg8ei32(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k32), vs3, 0x27);
}
void Riscv64Assembler::VSoxseg8ei64(VRegister vs3, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kIndexedOrdered, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VectorWidth::k64), vs3, 0x27);
}
void Riscv64Assembler::VL1re8(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VL1re16(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VL1re32(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VL1re64(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VL2re8(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 2), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VL2re16(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 2), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VL2re32(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 2), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VL2re64(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 2), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VL4re8(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 4), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VL4re16(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 4), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VL4re32(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 4), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VL4re64(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 4), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
}
void Riscv64Assembler::VL8re8(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 8), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k8), vd, 0x7);
}
void Riscv64Assembler::VL8re16(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 8), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k16), vd, 0x7);
}
void Riscv64Assembler::VL8re32(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 8), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k32), vd, 0x7);
}
void Riscv64Assembler::VL8re64(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
DCHECK_EQ((enum_cast<uint32_t>(vd) % 8), 0U);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::k64), vd, 0x7);
@@ -2910,21 +3416,25 @@ void Riscv64Assembler::VL4r(VRegister vd, XRegister rs1) { VL4re8(vd, rs1); }
void Riscv64Assembler::VL8r(VRegister vd, XRegister rs1) { VL8re8(vd, rs1); }
void Riscv64Assembler::VS1r(VRegister vs3, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k1, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::kWholeR), vs3, 0x27);
}
void Riscv64Assembler::VS2r(VRegister vs3, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k2, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::kWholeR), vs3, 0x27);
}
void Riscv64Assembler::VS4r(VRegister vs3, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k4, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::kWholeR), vs3, 0x27);
}
void Riscv64Assembler::VS8r(VRegister vs3, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kLoadStore, Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVMemF7(Nf::k8, 0x0, MemAddressMode::kUnitStride, VM::kUnmasked);
EmitR(funct7, 0b01000u, rs1, enum_cast<uint32_t>(VectorWidth::kWholeR), vs3, 0x27);
}
@@ -2934,42 +3444,49 @@ void Riscv64Assembler::VS8r(VRegister vs3, XRegister rs1) {
/////////////////////////////// RVV Arithmetic Instructions START ////////////////////////////
void Riscv64Assembler::VAdd_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VAdd_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000000, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VAdd_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000000, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VSub_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000010, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSub_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000010, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VRsub_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000011, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VRsub_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000011, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
@@ -2978,101 +3495,118 @@ void Riscv64Assembler::VRsub_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm
void Riscv64Assembler::VNeg_v(VRegister vd, VRegister vs2) { VRsub_vx(vd, vs2, Zero); }
void Riscv64Assembler::VMinu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000100, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMinu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000100, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMin_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000101, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMin_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000101, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMaxu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000110, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMaxu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000110, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMax_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000111, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMax_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000111, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VAnd_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VAnd_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001001, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VAnd_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001001, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VOr_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001010, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VOr_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b001010, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VOr_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001010, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VXor_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VXor_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001011, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VXor_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001011, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
@@ -3081,6 +3615,7 @@ void Riscv64Assembler::VXor_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm)
void Riscv64Assembler::VNot_v(VRegister vd, VRegister vs2, VM vm) { VXor_vi(vd, vs2, -1, vm); }
void Riscv64Assembler::VRgather_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -3089,6 +3624,7 @@ void Riscv64Assembler::VRgather_vv(VRegister vd, VRegister vs2, VRegister vs1, V
}
void Riscv64Assembler::VRgather_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b001100, vm);
@@ -3096,6 +3632,7 @@ void Riscv64Assembler::VRgather_vx(VRegister vd, VRegister vs2, XRegister rs1, V
}
void Riscv64Assembler::VRgather_vi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b001100, vm);
@@ -3103,6 +3640,7 @@ void Riscv64Assembler::VRgather_vi(VRegister vd, VRegister vs2, uint32_t uimm5,
}
void Riscv64Assembler::VSlideup_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b001110, vm);
@@ -3110,6 +3648,7 @@ void Riscv64Assembler::VSlideup_vx(VRegister vd, VRegister vs2, XRegister rs1, V
}
void Riscv64Assembler::VSlideup_vi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b001110, vm);
@@ -3117,6 +3656,7 @@ void Riscv64Assembler::VSlideup_vi(VRegister vd, VRegister vs2, uint32_t uimm5,
}
void Riscv64Assembler::VRgatherei16_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -3125,6 +3665,7 @@ void Riscv64Assembler::VRgatherei16_vv(VRegister vd, VRegister vs2, VRegister vs
}
void Riscv64Assembler::VSlidedown_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b001111, vm);
@@ -3132,183 +3673,216 @@ void Riscv64Assembler::VSlidedown_vx(VRegister vd, VRegister vs2, XRegister rs1,
}
void Riscv64Assembler::VSlidedown_vi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001111, vm);
EmitR(funct7, vs2, uimm5, enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VAdc_vvm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010000, VM::kV0_t);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VAdc_vxm(VRegister vd, VRegister vs2, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010000, VM::kV0_t);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VAdc_vim(VRegister vd, VRegister vs2, int32_t imm5) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010000, VM::kV0_t);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VMadc_vvm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010001, VM::kV0_t);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMadc_vxm(VRegister vd, VRegister vs2, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010001, VM::kV0_t);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMadc_vim(VRegister vd, VRegister vs2, int32_t imm5) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010001, VM::kV0_t);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VMadc_vv(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010001, VM::kUnmasked);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMadc_vx(VRegister vd, VRegister vs2, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010001, VM::kUnmasked);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMadc_vi(VRegister vd, VRegister vs2, int32_t imm5) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010001, VM::kUnmasked);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VSbc_vvm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, VM::kV0_t);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSbc_vxm(VRegister vd, VRegister vs2, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, VM::kV0_t);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMsbc_vvm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010011, VM::kV0_t);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMsbc_vxm(VRegister vd, VRegister vs2, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010011, VM::kV0_t);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMsbc_vv(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010011, VM::kUnmasked);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMsbc_vx(VRegister vd, VRegister vs2, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010011, VM::kUnmasked);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMerge_vvm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010111, VM::kV0_t);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMerge_vxm(VRegister vd, VRegister vs2, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010111, VM::kV0_t);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMerge_vim(VRegister vd, VRegister vs2, int32_t imm5) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010111, VM::kV0_t);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VMv_vv(VRegister vd, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010111, VM::kUnmasked);
EmitR(funct7, V0, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMv_vx(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010111, VM::kUnmasked);
EmitR(funct7, V0, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMv_vi(VRegister vd, int32_t imm5) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010111, VM::kUnmasked);
EmitR(funct7, V0, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VMseq_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMseq_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011000, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMseq_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011000, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VMsne_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMsne_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011001, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMsne_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011001, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VMsltu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011010, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMsltu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011010, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMsgtu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
VMsltu_vv(vd, vs1, vs2, vm);
}
void Riscv64Assembler::VMslt_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMslt_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011011, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
@@ -3319,18 +3893,21 @@ void Riscv64Assembler::VMsgt_vv(VRegister vd, VRegister vs2, VRegister vs1, VM v
}
void Riscv64Assembler::VMsleu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011100, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMsleu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011100, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMsleu_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011100, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
@@ -3346,18 +3923,21 @@ void Riscv64Assembler::VMsltu_vi(VRegister vd, VRegister vs2, int32_t aimm5, VM
}
void Riscv64Assembler::VMsle_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011101, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VMsle_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011101, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMsle_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011101, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
@@ -3372,29 +3952,34 @@ void Riscv64Assembler::VMslt_vi(VRegister vd, VRegister vs2, int32_t aimm5, VM v
}
void Riscv64Assembler::VMsgtu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011110, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMsgtu_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011110, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VMsgeu_vi(VRegister vd, VRegister vs2, int32_t aimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
CHECK(IsUint<4>(aimm5 - 1)) << "Should be between [1, 16]" << aimm5;
VMsgtu_vi(vd, vs2, aimm5 - 1, vm);
}
void Riscv64Assembler::VMsgt_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011111, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VMsgt_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011111, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
@@ -3405,102 +3990,119 @@ void Riscv64Assembler::VMsge_vi(VRegister vd, VRegister vs2, int32_t aimm5, VM v
}
void Riscv64Assembler::VSaddu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSaddu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100000, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VSaddu_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100000, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VSadd_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSadd_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100001, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VSadd_vi(VRegister vd, VRegister vs2, int32_t imm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100001, vm);
EmitR(funct7, vs2, EncodeInt5(imm5), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VSsubu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100010, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSsubu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100010, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VSsub_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSsub_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100011, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VSll_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100101, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSll_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100101, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VSll_vi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100101, vm);
EmitR(funct7, vs2, uimm5, enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VSmul_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100111, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSmul_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100111, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::Vmv1r_v(VRegister vd, VRegister vs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b100111, VM::kUnmasked);
EmitR(
funct7, vs2, enum_cast<uint32_t>(Nf::k1), enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::Vmv2r_v(VRegister vd, VRegister vs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_EQ(enum_cast<uint32_t>(vd) % 2, 0u);
DCHECK_EQ(enum_cast<uint32_t>(vs2) % 2, 0u);
const uint32_t funct7 = EncodeRVVF7(0b100111, VM::kUnmasked);
@@ -3509,6 +4111,7 @@ void Riscv64Assembler::Vmv2r_v(VRegister vd, VRegister vs2) {
}
void Riscv64Assembler::Vmv4r_v(VRegister vd, VRegister vs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_EQ(enum_cast<uint32_t>(vd) % 4, 0u);
DCHECK_EQ(enum_cast<uint32_t>(vs2) % 4, 0u);
const uint32_t funct7 = EncodeRVVF7(0b100111, VM::kUnmasked);
@@ -3517,6 +4120,7 @@ void Riscv64Assembler::Vmv4r_v(VRegister vd, VRegister vs2) {
}
void Riscv64Assembler::Vmv8r_v(VRegister vd, VRegister vs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_EQ(enum_cast<uint32_t>(vd) % 8, 0u);
DCHECK_EQ(enum_cast<uint32_t>(vs2) % 8, 0u);
const uint32_t funct7 = EncodeRVVF7(0b100111, VM::kUnmasked);
@@ -3525,252 +4129,296 @@ void Riscv64Assembler::Vmv8r_v(VRegister vd, VRegister vs2) {
}
void Riscv64Assembler::VSrl_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSrl_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101000, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VSrl_vi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101000, vm);
EmitR(funct7, vs2, uimm5, enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VSra_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSra_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101001, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VSra_vi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101001, vm);
EmitR(funct7, vs2, uimm5, enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VSsrl_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101010, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSsrl_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101010, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VSsrl_vi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101010, vm);
EmitR(funct7, vs2, uimm5, enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VSsra_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VSsra_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101011, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VSsra_vi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101011, vm);
EmitR(funct7, vs2, uimm5, enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VNsrl_wv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101100, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VNsrl_wx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101100, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VNsrl_wi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101100, vm);
EmitR(funct7, vs2, uimm5, enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VNcvt_x_x_w(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
VNsrl_wx(vd, vs2, Zero, vm);
}
void Riscv64Assembler::VNsra_wv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101101, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VNsra_wx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101101, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VNsra_wi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101101, vm);
EmitR(funct7, vs2, uimm5, enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VNclipu_wv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101110, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VNclipu_wx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101110, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VNclipu_wi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101110, vm);
EmitR(funct7, vs2, uimm5, enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VNclip_wv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101111, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VNclip_wx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101111, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPIVX), vd, 0x57);
}
void Riscv64Assembler::VNclip_wi(VRegister vd, VRegister vs2, uint32_t uimm5, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101111, vm);
EmitR(funct7, vs2, uimm5, enum_cast<uint32_t>(VAIEncoding::kOPIVI), vd, 0x57);
}
void Riscv64Assembler::VWredsumu_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b110000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VWredsum_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b110001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPIVV), vd, 0x57);
}
void Riscv64Assembler::VRedsum_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VRedand_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VRedor_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000010, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VRedxor_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VRedminu_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000100, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VRedmin_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000101, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VRedmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000110, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VRedmax_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000111, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VAaddu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VAaddu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001000, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VAadd_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VAadd_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001001, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VAsubu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001010, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VAsubu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001010, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VAsub_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VAsub_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001011, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VSlide1up_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b001110, vm);
@@ -3778,12 +4426,14 @@ void Riscv64Assembler::VSlide1up_vx(VRegister vd, VRegister vs2, XRegister rs1,
}
void Riscv64Assembler::VSlide1down_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001111, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VCompress_vm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010111, VM::kUnmasked);
@@ -3791,11 +4441,13 @@ void Riscv64Assembler::VCompress_vm(VRegister vd, VRegister vs2, VRegister vs1)
}
void Riscv64Assembler::VMandn_mm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b011000, VM::kUnmasked);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VMand_mm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b011001, VM::kUnmasked);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
@@ -3803,11 +4455,13 @@ void Riscv64Assembler::VMand_mm(VRegister vd, VRegister vs2, VRegister vs1) {
void Riscv64Assembler::VMmv_m(VRegister vd, VRegister vs2) { VMand_mm(vd, vs2, vs2); }
void Riscv64Assembler::VMor_mm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b011010, VM::kUnmasked);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VMxor_mm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b011011, VM::kUnmasked);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
@@ -3815,11 +4469,13 @@ void Riscv64Assembler::VMxor_mm(VRegister vd, VRegister vs2, VRegister vs1) {
void Riscv64Assembler::VMclr_m(VRegister vd) { VMxor_mm(vd, vd, vd); }
void Riscv64Assembler::VMorn_mm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b011100, VM::kUnmasked);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VMnand_mm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b011101, VM::kUnmasked);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
@@ -3827,11 +4483,13 @@ void Riscv64Assembler::VMnand_mm(VRegister vd, VRegister vs2, VRegister vs1) {
void Riscv64Assembler::VMnot_m(VRegister vd, VRegister vs2) { VMnand_mm(vd, vs2, vs2); }
void Riscv64Assembler::VMnor_mm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b011110, VM::kUnmasked);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VMxnor_mm(VRegister vd, VRegister vs2, VRegister vs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b011111, VM::kUnmasked);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
@@ -3839,138 +4497,161 @@ void Riscv64Assembler::VMxnor_mm(VRegister vd, VRegister vs2, VRegister vs1) {
void Riscv64Assembler::VMset_m(VRegister vd) { VMxnor_mm(vd, vd, vd); }
void Riscv64Assembler::VDivu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VDivu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100000, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VDiv_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VDiv_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100001, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VRemu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100010, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VRemu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100010, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VRem_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VRem_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100011, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VMulhu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100100, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VMulhu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100100, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VMul_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100101, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VMul_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100101, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VMulhsu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100110, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VMulhsu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100110, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VMulh_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100111, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VMulh_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100111, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VMadd_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VMadd_vx(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101001, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VNmsub_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VNmsub_vx(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101011, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VMacc_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101101, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VMacc_vx(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101101, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VNmsac_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -3979,12 +4660,14 @@ void Riscv64Assembler::VNmsac_vv(VRegister vd, VRegister vs1, VRegister vs2, VM
}
void Riscv64Assembler::VNmsac_vx(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101111, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VWaddu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -3993,6 +4676,7 @@ void Riscv64Assembler::VWaddu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM
}
void Riscv64Assembler::VWaddu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b110000, vm);
@@ -4004,6 +4688,7 @@ void Riscv64Assembler::VWcvtu_x_x_v(VRegister vd, VRegister vs, VM vm) {
}
void Riscv64Assembler::VWadd_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4012,6 +4697,7 @@ void Riscv64Assembler::VWadd_vv(VRegister vd, VRegister vs2, VRegister vs1, VM v
}
void Riscv64Assembler::VWadd_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b110001, vm);
@@ -4023,6 +4709,7 @@ void Riscv64Assembler::VWcvt_x_x_v(VRegister vd, VRegister vs, VM vm) {
}
void Riscv64Assembler::VWsubu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4031,6 +4718,7 @@ void Riscv64Assembler::VWsubu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM
}
void Riscv64Assembler::VWsubu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b110010, vm);
@@ -4038,6 +4726,7 @@ void Riscv64Assembler::VWsubu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM
}
void Riscv64Assembler::VWsub_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4046,6 +4735,7 @@ void Riscv64Assembler::VWsub_vv(VRegister vd, VRegister vs2, VRegister vs1, VM v
}
void Riscv64Assembler::VWsub_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b110011, vm);
@@ -4053,6 +4743,7 @@ void Riscv64Assembler::VWsub_vx(VRegister vd, VRegister vs2, XRegister rs1, VM v
}
void Riscv64Assembler::VWaddu_wv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
const uint32_t funct7 = EncodeRVVF7(0b110100, vm);
@@ -4060,12 +4751,14 @@ void Riscv64Assembler::VWaddu_wv(VRegister vd, VRegister vs2, VRegister vs1, VM
}
void Riscv64Assembler::VWaddu_wx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b110100, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VWadd_wv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
const uint32_t funct7 = EncodeRVVF7(0b110101, vm);
@@ -4073,12 +4766,14 @@ void Riscv64Assembler::VWadd_wv(VRegister vd, VRegister vs2, VRegister vs1, VM v
}
void Riscv64Assembler::VWadd_wx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b110101, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VWsubu_wv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
const uint32_t funct7 = EncodeRVVF7(0b110110, vm);
@@ -4086,12 +4781,14 @@ void Riscv64Assembler::VWsubu_wv(VRegister vd, VRegister vs2, VRegister vs1, VM
}
void Riscv64Assembler::VWsubu_wx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b110110, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VWsub_wv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
const uint32_t funct7 = EncodeRVVF7(0b110111, vm);
@@ -4099,12 +4796,14 @@ void Riscv64Assembler::VWsub_wv(VRegister vd, VRegister vs2, VRegister vs1, VM v
}
void Riscv64Assembler::VWsub_wx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b110111, vm);
EmitR(funct7, vs2, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VWmulu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4113,6 +4812,7 @@ void Riscv64Assembler::VWmulu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM
}
void Riscv64Assembler::VWmulu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111000, vm);
@@ -4120,6 +4820,7 @@ void Riscv64Assembler::VWmulu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM
}
void Riscv64Assembler::VWmulsu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4128,6 +4829,7 @@ void Riscv64Assembler::VWmulsu_vv(VRegister vd, VRegister vs2, VRegister vs1, VM
}
void Riscv64Assembler::VWmulsu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111010, vm);
@@ -4135,6 +4837,7 @@ void Riscv64Assembler::VWmulsu_vx(VRegister vd, VRegister vs2, XRegister rs1, VM
}
void Riscv64Assembler::VWmul_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4143,6 +4846,7 @@ void Riscv64Assembler::VWmul_vv(VRegister vd, VRegister vs2, VRegister vs1, VM v
}
void Riscv64Assembler::VWmul_vx(VRegister vd, VRegister vs2, XRegister rs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111011, vm);
@@ -4150,6 +4854,7 @@ void Riscv64Assembler::VWmul_vx(VRegister vd, VRegister vs2, XRegister rs1, VM v
}
void Riscv64Assembler::VWmaccu_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4158,6 +4863,7 @@ void Riscv64Assembler::VWmaccu_vv(VRegister vd, VRegister vs1, VRegister vs2, VM
}
void Riscv64Assembler::VWmaccu_vx(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111100, vm);
@@ -4165,6 +4871,7 @@ void Riscv64Assembler::VWmaccu_vx(VRegister vd, XRegister rs1, VRegister vs2, VM
}
void Riscv64Assembler::VWmacc_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4173,6 +4880,7 @@ void Riscv64Assembler::VWmacc_vv(VRegister vd, VRegister vs1, VRegister vs2, VM
}
void Riscv64Assembler::VWmacc_vx(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111101, vm);
@@ -4180,6 +4888,7 @@ void Riscv64Assembler::VWmacc_vx(VRegister vd, XRegister rs1, VRegister vs2, VM
}
void Riscv64Assembler::VWmaccus_vx(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111110, vm);
@@ -4187,6 +4896,7 @@ void Riscv64Assembler::VWmaccus_vx(VRegister vd, XRegister rs1, VRegister vs2, V
}
void Riscv64Assembler::VWmaccsu_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4195,6 +4905,7 @@ void Riscv64Assembler::VWmaccsu_vv(VRegister vd, VRegister vs1, VRegister vs2, V
}
void Riscv64Assembler::VWmaccsu_vx(VRegister vd, XRegister rs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111111, vm);
@@ -4202,92 +4913,108 @@ void Riscv64Assembler::VWmaccsu_vx(VRegister vd, XRegister rs1, VRegister vs2, V
}
void Riscv64Assembler::VFadd_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFadd_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000000, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFredusum_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFsub_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000010, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFsub_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000010, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFredosum_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFmin_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000100, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFmin_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000100, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFredmin_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000101, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFmax_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000110, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFmax_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b000110, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFredmax_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b000111, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFsgnj_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFsgnj_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001000, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFsgnjn_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFsgnjn_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001001, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
@@ -4296,12 +5023,14 @@ void Riscv64Assembler::VFsgnjn_vf(VRegister vd, VRegister vs2, FRegister fs1, VM
void Riscv64Assembler::VFneg_v(VRegister vd, VRegister vs) { VFsgnjn_vv(vd, vs, vs); }
void Riscv64Assembler::VFsgnjx_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001010, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFsgnjx_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001010, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
@@ -4310,6 +5039,7 @@ void Riscv64Assembler::VFsgnjx_vf(VRegister vd, VRegister vs2, FRegister fs1, VM
void Riscv64Assembler::VFabs_v(VRegister vd, VRegister vs) { VFsgnjx_vv(vd, vs, vs); }
void Riscv64Assembler::VFslide1up_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b001110, vm);
@@ -4317,41 +5047,48 @@ void Riscv64Assembler::VFslide1up_vf(VRegister vd, VRegister vs2, FRegister fs1,
}
void Riscv64Assembler::VFslide1down_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b001111, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFmerge_vfm(VRegister vd, VRegister vs2, FRegister fs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK(vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010111, VM::kV0_t);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFmv_v_f(VRegister vd, FRegister fs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010111, VM::kUnmasked);
EmitR(funct7, V0, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VMfeq_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VMfeq_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011000, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VMfle_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VMfle_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011001, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
@@ -4362,12 +5099,14 @@ void Riscv64Assembler::VMfge_vv(VRegister vd, VRegister vs2, VRegister vs1, VM v
}
void Riscv64Assembler::VMflt_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VMflt_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011011, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
@@ -4378,161 +5117,188 @@ void Riscv64Assembler::VMfgt_vv(VRegister vd, VRegister vs2, VRegister vs1, VM v
}
void Riscv64Assembler::VMfne_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011100, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VMfne_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011100, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VMfgt_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011101, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VMfge_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b011111, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFdiv_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b100000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFdiv_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100000, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFrdiv_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100001, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFmul_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100100, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFmul_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100100, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFrsub_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b100111, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFmadd_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101000, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFmadd_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101000, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFnmadd_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFnmadd_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101001, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFmsub_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101010, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFmsub_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101010, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFnmsub_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFnmsub_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101011, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFmacc_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101100, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFmacc_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101100, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFnmacc_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101101, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFnmacc_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101101, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFmsac_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101110, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFmsac_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101110, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFnmsac_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101111, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFnmsac_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b101111, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFwadd_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4541,6 +5307,7 @@ void Riscv64Assembler::VFwadd_vv(VRegister vd, VRegister vs2, VRegister vs1, VM
}
void Riscv64Assembler::VFwadd_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b110000, vm);
@@ -4548,12 +5315,14 @@ void Riscv64Assembler::VFwadd_vf(VRegister vd, VRegister vs2, FRegister fs1, VM
}
void Riscv64Assembler::VFwredusum_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b110001, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFwsub_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4562,6 +5331,7 @@ void Riscv64Assembler::VFwsub_vv(VRegister vd, VRegister vs2, VRegister vs1, VM
}
void Riscv64Assembler::VFwsub_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b110010, vm);
@@ -4569,11 +5339,13 @@ void Riscv64Assembler::VFwsub_vf(VRegister vd, VRegister vs2, FRegister fs1, VM
}
void Riscv64Assembler::VFwredosum_vs(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b110011, vm);
EmitR(funct7, vs2, vs1, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFwadd_wv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
const uint32_t funct7 = EncodeRVVF7(0b110100, vm);
@@ -4581,12 +5353,14 @@ void Riscv64Assembler::VFwadd_wv(VRegister vd, VRegister vs2, VRegister vs1, VM
}
void Riscv64Assembler::VFwadd_wf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b110100, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFwsub_wv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
const uint32_t funct7 = EncodeRVVF7(0b110110, vm);
@@ -4594,12 +5368,14 @@ void Riscv64Assembler::VFwsub_wv(VRegister vd, VRegister vs2, VRegister vs1, VM
}
void Riscv64Assembler::VFwsub_wf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b110110, vm);
EmitR(funct7, vs2, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFwmul_vv(VRegister vd, VRegister vs2, VRegister vs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4608,6 +5384,7 @@ void Riscv64Assembler::VFwmul_vv(VRegister vd, VRegister vs2, VRegister vs1, VM
}
void Riscv64Assembler::VFwmul_vf(VRegister vd, VRegister vs2, FRegister fs1, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111000, vm);
@@ -4615,6 +5392,7 @@ void Riscv64Assembler::VFwmul_vf(VRegister vd, VRegister vs2, FRegister fs1, VM
}
void Riscv64Assembler::VFwmacc_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4623,6 +5401,7 @@ void Riscv64Assembler::VFwmacc_vv(VRegister vd, VRegister vs1, VRegister vs2, VM
}
void Riscv64Assembler::VFwmacc_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111100, vm);
@@ -4630,6 +5409,7 @@ void Riscv64Assembler::VFwmacc_vf(VRegister vd, FRegister fs1, VRegister vs2, VM
}
void Riscv64Assembler::VFwnmacc_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4638,6 +5418,7 @@ void Riscv64Assembler::VFwnmacc_vv(VRegister vd, VRegister vs1, VRegister vs2, V
}
void Riscv64Assembler::VFwnmacc_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111101, vm);
@@ -4645,6 +5426,7 @@ void Riscv64Assembler::VFwnmacc_vf(VRegister vd, FRegister fs1, VRegister vs2, V
}
void Riscv64Assembler::VFwmsac_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4653,6 +5435,7 @@ void Riscv64Assembler::VFwmsac_vv(VRegister vd, VRegister vs1, VRegister vs2, VM
}
void Riscv64Assembler::VFwmsac_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111110, vm);
@@ -4660,6 +5443,7 @@ void Riscv64Assembler::VFwmsac_vf(VRegister vd, FRegister fs1, VRegister vs2, VM
}
void Riscv64Assembler::VFwnmsac_vv(VRegister vd, VRegister vs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs1);
DCHECK(vd != vs2);
@@ -4668,6 +5452,7 @@ void Riscv64Assembler::VFwnmsac_vv(VRegister vd, VRegister vs1, VRegister vs2, V
}
void Riscv64Assembler::VFwnmsac_vf(VRegister vd, FRegister fs1, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b111111, vm);
@@ -4675,108 +5460,127 @@ void Riscv64Assembler::VFwnmsac_vf(VRegister vd, FRegister fs1, VRegister vs2, V
}
void Riscv64Assembler::VMv_s_x(VRegister vd, XRegister rs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010000, VM::kUnmasked);
EmitR(funct7, 0b00000, rs1, enum_cast<uint32_t>(VAIEncoding::kOPMVX), vd, 0x57);
}
void Riscv64Assembler::VMv_x_s(XRegister rd, VRegister vs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010000, VM::kUnmasked);
EmitR(funct7, vs2, 0b00000, enum_cast<uint32_t>(VAIEncoding::kOPMVV), rd, 0x57);
}
void Riscv64Assembler::VCpop_m(XRegister rd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010000, vm);
EmitR(funct7, vs2, 0b10000, enum_cast<uint32_t>(VAIEncoding::kOPMVV), rd, 0x57);
}
void Riscv64Assembler::VFirst_m(XRegister rd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010000, vm);
EmitR(funct7, vs2, 0b10001, enum_cast<uint32_t>(VAIEncoding::kOPMVV), rd, 0x57);
}
void Riscv64Assembler::VZext_vf8(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00010, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VSext_vf8(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00011, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VZext_vf4(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00100, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VSext_vf4(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00101, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VZext_vf2(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00110, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VSext_vf2(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00111, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
}
void Riscv64Assembler::VFmv_s_f(VRegister vd, FRegister fs1) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010000, VM::kUnmasked);
EmitR(funct7, 0b00000, fs1, enum_cast<uint32_t>(VAIEncoding::kOPFVF), vd, 0x57);
}
void Riscv64Assembler::VFmv_f_s(FRegister fd, VRegister vs2) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
const uint32_t funct7 = EncodeRVVF7(0b010000, VM::kUnmasked);
EmitR(funct7, vs2, 0b00000, enum_cast<uint32_t>(VAIEncoding::kOPFVV), fd, 0x57);
}
void Riscv64Assembler::VFcvt_xu_f_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00000, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFcvt_x_f_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00001, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFcvt_f_xu_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00010, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFcvt_f_x_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00011, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFcvt_rtz_xu_f_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00110, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFcvt_rtz_x_f_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b00111, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFwcvt_xu_f_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
@@ -4784,6 +5588,7 @@ void Riscv64Assembler::VFwcvt_xu_f_v(VRegister vd, VRegister vs2, VM vm) {
}
void Riscv64Assembler::VFwcvt_x_f_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
@@ -4791,6 +5596,7 @@ void Riscv64Assembler::VFwcvt_x_f_v(VRegister vd, VRegister vs2, VM vm) {
}
void Riscv64Assembler::VFwcvt_f_xu_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
@@ -4798,6 +5604,7 @@ void Riscv64Assembler::VFwcvt_f_xu_v(VRegister vd, VRegister vs2, VM vm) {
}
void Riscv64Assembler::VFwcvt_f_x_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
@@ -4805,6 +5612,7 @@ void Riscv64Assembler::VFwcvt_f_x_v(VRegister vd, VRegister vs2, VM vm) {
}
void Riscv64Assembler::VFwcvt_f_f_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
@@ -4812,6 +5620,7 @@ void Riscv64Assembler::VFwcvt_f_f_v(VRegister vd, VRegister vs2, VM vm) {
}
void Riscv64Assembler::VFwcvt_rtz_xu_f_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
@@ -4819,6 +5628,7 @@ void Riscv64Assembler::VFwcvt_rtz_xu_f_v(VRegister vd, VRegister vs2, VM vm) {
}
void Riscv64Assembler::VFwcvt_rtz_x_f_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
@@ -4826,78 +5636,91 @@ void Riscv64Assembler::VFwcvt_rtz_x_f_v(VRegister vd, VRegister vs2, VM vm) {
}
void Riscv64Assembler::VFncvt_xu_f_w(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b10000, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFncvt_x_f_w(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b10001, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFncvt_f_xu_w(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b10010, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFncvt_f_x_w(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b10011, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFncvt_f_f_w(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b10100, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFncvt_rod_f_f_w(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b10101, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFncvt_rtz_xu_f_w(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b10110, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFncvt_rtz_x_f_w(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010010, vm);
EmitR(funct7, vs2, 0b10111, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFsqrt_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010011, vm);
EmitR(funct7, vs2, 0b00000, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFrsqrt7_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010011, vm);
EmitR(funct7, vs2, 0b00100, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFrec7_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010011, vm);
EmitR(funct7, vs2, 0b00101, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VFclass_v(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010011, vm);
EmitR(funct7, vs2, 0b10000, enum_cast<uint32_t>(VAIEncoding::kOPFVV), vd, 0x57);
}
void Riscv64Assembler::VMsbf_m(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010100, vm);
@@ -4905,6 +5728,7 @@ void Riscv64Assembler::VMsbf_m(VRegister vd, VRegister vs2, VM vm) {
}
void Riscv64Assembler::VMsof_m(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010100, vm);
@@ -4912,6 +5736,7 @@ void Riscv64Assembler::VMsof_m(VRegister vd, VRegister vs2, VM vm) {
}
void Riscv64Assembler::VMsif_m(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010100, vm);
@@ -4919,6 +5744,7 @@ void Riscv64Assembler::VMsif_m(VRegister vd, VRegister vs2, VM vm) {
}
void Riscv64Assembler::VIota_m(VRegister vd, VRegister vs2, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
DCHECK(vd != vs2);
const uint32_t funct7 = EncodeRVVF7(0b010100, vm);
@@ -4926,6 +5752,7 @@ void Riscv64Assembler::VIota_m(VRegister vd, VRegister vs2, VM vm) {
}
void Riscv64Assembler::VId_v(VRegister vd, VM vm) {
+ AssertExtensionsEnabled(Riscv64Extension::kV);
DCHECK_IMPLIES(vm == VM::kV0_t, vd != V0);
const uint32_t funct7 = EncodeRVVF7(0b010100, vm);
EmitR(funct7, V0, 0b10001, enum_cast<uint32_t>(VAIEncoding::kOPMVV), vd, 0x57);
@@ -4952,28 +5779,75 @@ void Riscv64Assembler::Neg(XRegister rd, XRegister rs) { Sub(rd, Zero, rs); }
void Riscv64Assembler::NegW(XRegister rd, XRegister rs) { Subw(rd, Zero, rs); }
void Riscv64Assembler::SextB(XRegister rd, XRegister rs) {
- Slli(rd, rs, kXlen - 8u);
- Srai(rd, rd, kXlen - 8u);
+ if (IsExtensionEnabled(Riscv64Extension::kZbb)) {
+ if (IsExtensionEnabled(Riscv64Extension::kZcb) && rd == rs && IsShortReg(rd)) {
+ CSextB(rd);
+ } else {
+ ZbbSextB(rd, rs);
+ }
+ } else {
+ Slli(rd, rs, kXlen - 8u);
+ Srai(rd, rd, kXlen - 8u);
+ }
}
void Riscv64Assembler::SextH(XRegister rd, XRegister rs) {
- Slli(rd, rs, kXlen - 16u);
- Srai(rd, rd, kXlen - 16u);
+ if (IsExtensionEnabled(Riscv64Extension::kZbb)) {
+ if (IsExtensionEnabled(Riscv64Extension::kZcb) && rd == rs && IsShortReg(rd)) {
+ CSextH(rd);
+ } else {
+ ZbbSextH(rd, rs);
+ }
+ } else {
+ Slli(rd, rs, kXlen - 16u);
+ Srai(rd, rd, kXlen - 16u);
+ }
}
-void Riscv64Assembler::SextW(XRegister rd, XRegister rs) { Addiw(rd, rs, 0); }
+void Riscv64Assembler::SextW(XRegister rd, XRegister rs) {
+ if (IsExtensionEnabled(Riscv64Extension::kZca) && rd != Zero && (rd == rs || rs == Zero)) {
+ if (rd == rs) {
+ CAddiw(rd, 0);
+ } else {
+ CLi(rd, 0);
+ }
+ } else {
+ Addiw(rd, rs, 0);
+ }
+}
-void Riscv64Assembler::ZextB(XRegister rd, XRegister rs) { Andi(rd, rs, 0xff); }
+void Riscv64Assembler::ZextB(XRegister rd, XRegister rs) {
+ if (IsExtensionEnabled(Riscv64Extension::kZcb) && rd == rs && IsShortReg(rd)) {
+ CZextB(rd);
+ } else {
+ Andi(rd, rs, 0xff);
+ }
+}
void Riscv64Assembler::ZextH(XRegister rd, XRegister rs) {
- Slli(rd, rs, kXlen - 16u);
- Srli(rd, rd, kXlen - 16u);
+ if (IsExtensionEnabled(Riscv64Extension::kZbb)) {
+ if (IsExtensionEnabled(Riscv64Extension::kZcb) && rd == rs && IsShortReg(rd)) {
+ CZextH(rd);
+ } else {
+ ZbbZextH(rd, rs);
+ }
+ } else {
+ Slli(rd, rs, kXlen - 16u);
+ Srli(rd, rd, kXlen - 16u);
+ }
}
void Riscv64Assembler::ZextW(XRegister rd, XRegister rs) {
- // TODO(riscv64): Use the ZEXT.W alias for ADD.UW from the Zba extension.
- Slli(rd, rs, kXlen - 32u);
- Srli(rd, rd, kXlen - 32u);
+ if (IsExtensionEnabled(Riscv64Extension::kZba)) {
+ if (IsExtensionEnabled(Riscv64Extension::kZcb) && rd == rs && IsShortReg(rd)) {
+ CZextW(rd);
+ } else {
+ AddUw(rd, rs, Zero);
+ }
+ } else {
+ Slli(rd, rs, kXlen - 32u);
+ Srli(rd, rd, kXlen - 32u);
+ }
}
void Riscv64Assembler::Seqz(XRegister rd, XRegister rs) { Sltiu(rd, rs, 1); }
diff --git a/compiler/utils/riscv64/assembler_riscv64.h b/compiler/utils/riscv64/assembler_riscv64.h
index 1696251bf6..40c63813ea 100644
--- a/compiler/utils/riscv64/assembler_riscv64.h
+++ b/compiler/utils/riscv64/assembler_riscv64.h
@@ -41,6 +41,45 @@ static constexpr size_t kRiscv64WordSize = 4;
static constexpr size_t kRiscv64DoublewordSize = 8;
static constexpr size_t kRiscv64FloatRegSizeInBytes = 8;
+// The `Riscv64Extension` enumeration is used for restricting the instructions that the assembler
+// can use. Some restrictions are checked only in debug mode (for example load and store
+// instructions check `kLoadStore`), other restrictions are checked at run time and affect the
+// emitted code (for example, the `SextW()` pseudo-instruction selects between an implementation
+// from "Zcb", "Zbb" and a two-instruction sequence from the basic instruction set.
+enum class Riscv64Extension : uint32_t {
+ kLoadStore, // Pseudo-extension encompassing all loads and stores. Used to check that
+ // we do not have loads and stores in the middle of a LR/SC sequence.
+ kZifencei,
+ kM,
+ kA,
+ kZicsr,
+ kF,
+ kD,
+ kZba,
+ kZbb,
+ kZbs, // TODO(riscv64): Implement "Zbs" instructions.
+ kV,
+ kZca, // "C" extension instructions except floating point loads/stores.
+ kZcd, // "C" extension double loads/stores.
+ // Note: RV64 cannot implement Zcf ("C" extension float loads/stores).
+ kZcb, // Simple 16-bit operations not present in the original "C" extension.
+
+ kLast = kZcb
+};
+
+using Riscv64ExtensionMask = uint32_t;
+
+constexpr Riscv64ExtensionMask Riscv64ExtensionBit(Riscv64Extension ext) {
+ return 1u << enum_cast<>(ext);
+}
+
+constexpr Riscv64ExtensionMask kRiscv64AllExtensionsMask =
+ MaxInt<Riscv64ExtensionMask>(enum_cast<>(Riscv64Extension::kLast) + 1);
+
+// Extensions allowed in a LR/SC sequence (between the LR and SC).
+constexpr Riscv64ExtensionMask kRiscv64LrScSequenceExtensionsMask =
+ Riscv64ExtensionBit(Riscv64Extension::kZca);
+
enum class FPRoundingMode : uint32_t {
kRNE = 0x0, // Round to Nearest, ties to Even
kRTZ = 0x1, // Round towards Zero
@@ -175,6 +214,12 @@ class Riscv64Assembler final : public Assembler {
public:
explicit Riscv64Assembler(ArenaAllocator* allocator,
const Riscv64InstructionSetFeatures* instruction_set_features = nullptr)
+ : Riscv64Assembler(allocator,
+ instruction_set_features != nullptr
+ ? ConvertExtensions(instruction_set_features)
+ : kRiscv64AllExtensionsMask) {}
+
+ Riscv64Assembler(ArenaAllocator* allocator, Riscv64ExtensionMask enabled_extensions)
: Assembler(allocator),
branches_(allocator->Adapter(kArenaAllocAssembler)),
finalized_(false),
@@ -186,9 +231,9 @@ class Riscv64Assembler final : public Assembler {
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0),
+ enabled_extensions_(enabled_extensions),
available_scratch_core_registers_((1u << TMP) | (1u << TMP2)),
available_scratch_fp_registers_(1u << FTMP) {
- UNUSED(instruction_set_features);
cfi().DelayEmittingAdvancePCs();
}
@@ -201,6 +246,10 @@ class Riscv64Assembler final : public Assembler {
size_t CodeSize() const override { return Assembler::CodeSize(); }
DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
+ bool IsExtensionEnabled(Riscv64Extension ext) {
+ return (enabled_extensions_ & Riscv64ExtensionBit(ext)) != 0u;
+ }
+
// According to "The RISC-V Instruction Set Manual"
// LUI/AUIPC (RV32I, with sign-extension on RV64I), opcode = 0x17, 0x37
@@ -530,13 +579,15 @@ class Riscv64Assembler final : public Assembler {
void CLh(XRegister rd_s, XRegister rs1_s, int32_t offset);
void CSb(XRegister rd_s, XRegister rs1_s, int32_t offset);
void CSh(XRegister rd_s, XRegister rs1_s, int32_t offset);
- void CZext_b(XRegister rd_rs1_s);
- void CSext_b(XRegister rd_rs1_s);
- void CZext_h(XRegister rd_rs1_s);
- void CSext_h(XRegister rd_rs1_s);
- void CZext_w(XRegister rd_rs1_s);
+ void CZextB(XRegister rd_rs1_s);
+ void CSextB(XRegister rd_rs1_s);
+ void CZextH(XRegister rd_rs1_s);
+ void CSextH(XRegister rd_rs1_s);
+ void CZextW(XRegister rd_rs1_s);
void CNot(XRegister rd_rs1_s);
void CMul(XRegister rd_s, XRegister rs2_s);
+ // "Zcb" Standard Extension End; resume "C" Standard Extension.
+ // TODO(riscv64): Reorder "Zcb" after remaining "C" instructions.
void CJ(int32_t offset);
void CJr(XRegister rs1);
@@ -1841,6 +1892,38 @@ class Riscv64Assembler final : public Assembler {
uint32_t GetAdjustedPosition(uint32_t old_position);
private:
+ static uint32_t ConvertExtensions(
+ const Riscv64InstructionSetFeatures* instruction_set_features) {
+ // The `Riscv64InstructionSetFeatures` currently does not support "Zcb",
+ // only the original "C" extension. For riscv64 that means "Zca" and "Zcd".
+ constexpr Riscv64ExtensionMask kCompressedExtensionsMask =
+ Riscv64ExtensionBit(Riscv64Extension::kZca) | Riscv64ExtensionBit(Riscv64Extension::kZcd);
+ return
+ (Riscv64ExtensionBit(Riscv64Extension::kLoadStore)) |
+ (Riscv64ExtensionBit(Riscv64Extension::kZifencei)) |
+ (Riscv64ExtensionBit(Riscv64Extension::kM)) |
+ (Riscv64ExtensionBit(Riscv64Extension::kA)) |
+ (Riscv64ExtensionBit(Riscv64Extension::kZicsr)) |
+ (Riscv64ExtensionBit(Riscv64Extension::kF)) |
+ (Riscv64ExtensionBit(Riscv64Extension::kD)) |
+ (instruction_set_features->HasZba() ? Riscv64ExtensionBit(Riscv64Extension::kZba) : 0u) |
+ (instruction_set_features->HasZbb() ? Riscv64ExtensionBit(Riscv64Extension::kZbb) : 0u) |
+ (instruction_set_features->HasZbs() ? Riscv64ExtensionBit(Riscv64Extension::kZbs) : 0u) |
+ (instruction_set_features->HasVector() ? Riscv64ExtensionBit(Riscv64Extension::kV) : 0u) |
+ (instruction_set_features->HasCompressed() ? kCompressedExtensionsMask : 0u);
+ }
+
+ void AssertExtensionsEnabled(Riscv64Extension ext) {
+ DCHECK(IsExtensionEnabled(ext))
+ << "ext=" << enum_cast<>(ext) << " enabled=0x" << std::hex << enabled_extensions_;
+ }
+
+ template <typename... OtherExt>
+ void AssertExtensionsEnabled(Riscv64Extension ext, OtherExt... other_ext) {
+ AssertExtensionsEnabled(ext);
+ AssertExtensionsEnabled(other_ext...);
+ }
+
enum BranchCondition : uint8_t {
kCondEQ,
kCondNE,
@@ -2610,16 +2693,53 @@ class Riscv64Assembler final : public Assembler {
uint32_t last_old_position_;
uint32_t last_branch_id_;
+ Riscv64ExtensionMask enabled_extensions_;
uint32_t available_scratch_core_registers_;
uint32_t available_scratch_fp_registers_;
static constexpr uint32_t kXlen = 64;
+ friend class ScopedExtensionsOverride;
friend class ScratchRegisterScope;
DISALLOW_COPY_AND_ASSIGN(Riscv64Assembler);
};
+class ScopedExtensionsOverride {
+ public:
+ ScopedExtensionsOverride(Riscv64Assembler* assembler, Riscv64ExtensionMask enabled_extensions)
+ : assembler_(assembler),
+ old_enabled_extensions_(assembler->enabled_extensions_) {
+ assembler->enabled_extensions_ = enabled_extensions;
+ }
+
+ ~ScopedExtensionsOverride() {
+ assembler_->enabled_extensions_ = old_enabled_extensions_;
+ }
+
+ protected:
+ static Riscv64ExtensionMask GetEnabledExtensions(Riscv64Assembler* assembler) {
+ return assembler->enabled_extensions_;
+ }
+
+ private:
+ Riscv64Assembler* const assembler_;
+ const Riscv64ExtensionMask old_enabled_extensions_;
+};
+
+template <Riscv64ExtensionMask kMask>
+class ScopedExtensionsRestriction : public ScopedExtensionsOverride {
+ public:
+ explicit ScopedExtensionsRestriction(Riscv64Assembler* assembler)
+ : ScopedExtensionsOverride(assembler, GetEnabledExtensions(assembler) & kMask) {}
+};
+
+template <Riscv64ExtensionMask kMask>
+using ScopedExtensionsExclusion = ScopedExtensionsRestriction<~kMask>;
+
+using ScopedLrScExtensionsRestriction =
+ ScopedExtensionsRestriction<kRiscv64LrScSequenceExtensionsMask>;
+
class ScratchRegisterScope {
public:
explicit ScratchRegisterScope(Riscv64Assembler* assembler)
diff --git a/compiler/utils/riscv64/assembler_riscv64_test.cc b/compiler/utils/riscv64/assembler_riscv64_test.cc
index 87c7641576..a327987419 100644
--- a/compiler/utils/riscv64/assembler_riscv64_test.cc
+++ b/compiler/utils/riscv64/assembler_riscv64_test.cc
@@ -28,6 +28,11 @@
namespace art HIDDEN {
namespace riscv64 {
+constexpr Riscv64ExtensionMask kRiscv64CompressedExtensionsMask =
+ Riscv64ExtensionBit(Riscv64Extension::kZca) |
+ Riscv64ExtensionBit(Riscv64Extension::kZcd) |
+ Riscv64ExtensionBit(Riscv64Extension::kZcb);
+
struct RISCV64CpuRegisterCompare {
bool operator()(const XRegister& a, const XRegister& b) const { return a < b; }
};
@@ -42,12 +47,11 @@ class AssemblerRISCV64Test : public AssemblerTest<Riscv64Assembler,
using Base =
AssemblerTest<Riscv64Assembler, Riscv64Label, XRegister, FRegister, int32_t, VRegister>;
- AssemblerRISCV64Test()
- : instruction_set_features_(Riscv64InstructionSetFeatures::FromVariant("generic", nullptr)) {}
+ AssemblerRISCV64Test() {}
protected:
Riscv64Assembler* CreateAssembler(ArenaAllocator* allocator) override {
- return new (allocator) Riscv64Assembler(allocator, instruction_set_features_.get());
+ return new (allocator) Riscv64Assembler(allocator, kRiscv64AllExtensionsMask);
}
InstructionSet GetIsa() override { return InstructionSet::kRiscv64; }
@@ -71,10 +75,40 @@ class AssemblerRISCV64Test : public AssemblerTest<Riscv64Assembler,
class ScopedCSuppression {
public:
explicit ScopedCSuppression(AssemblerRISCV64Test* test)
- : smo_(test, "-march=rv64imafdv_zba_zbb") {}
+ : smo_(test, "-march=rv64imafdv_zba_zbb"),
+ exclusion_(test->GetAssembler()) {}
+
+ private:
+ ScopedMarchOverride smo_;
+ ScopedExtensionsExclusion<kRiscv64CompressedExtensionsMask> exclusion_;
+ };
+
+ class ScopedZbaAndCSuppression {
+ public:
+ explicit ScopedZbaAndCSuppression(AssemblerRISCV64Test* test)
+ : smo_(test, "-march=rv64imafdv_zbb"),
+ exclusion_(test->GetAssembler()) {}
+
+ private:
+ static constexpr Riscv64ExtensionMask kExcludedExtensions =
+ Riscv64ExtensionBit(Riscv64Extension::kZba) | kRiscv64CompressedExtensionsMask;
+
+ ScopedMarchOverride smo_;
+ ScopedExtensionsExclusion<kExcludedExtensions> exclusion_;
+ };
+
+ class ScopedZbbAndCSuppression {
+ public:
+ explicit ScopedZbbAndCSuppression(AssemblerRISCV64Test* test)
+ : smo_(test, "-march=rv64imafdv_zba"),
+ exclusion_(test->GetAssembler()) {}
private:
+ static constexpr Riscv64ExtensionMask kExcludedExtensions =
+ Riscv64ExtensionBit(Riscv64Extension::kZbb) | kRiscv64CompressedExtensionsMask;
+
ScopedMarchOverride smo_;
+ ScopedExtensionsExclusion<kExcludedExtensions> exclusion_;
};
std::vector<std::string> GetAssemblerCommand() override {
@@ -2186,7 +2220,6 @@ class AssemblerRISCV64Test : public AssemblerTest<Riscv64Assembler,
std::map<XRegister, std::string, RISCV64CpuRegisterCompare> secondary_register_names_;
- std::unique_ptr<const Riscv64InstructionSetFeatures> instruction_set_features_;
std::optional<std::string> march_override_;
};
@@ -3419,24 +3452,24 @@ TEST_F(AssemblerRISCV64Test, CSh) {
"CSh");
}
-TEST_F(AssemblerRISCV64Test, CZext_b) {
- DriverStr(RepeatCRShort(&Riscv64Assembler::CZext_b, "c.zext.b {reg}"), "CZext_b");
+TEST_F(AssemblerRISCV64Test, CZextB) {
+ DriverStr(RepeatCRShort(&Riscv64Assembler::CZextB, "c.zext.b {reg}"), "CZextB");
}
-TEST_F(AssemblerRISCV64Test, CSext_b) {
- DriverStr(RepeatCRShort(&Riscv64Assembler::CSext_b, "c.sext.b {reg}"), "CSext_b");
+TEST_F(AssemblerRISCV64Test, CSextB) {
+ DriverStr(RepeatCRShort(&Riscv64Assembler::CSextB, "c.sext.b {reg}"), "CSextB");
}
-TEST_F(AssemblerRISCV64Test, CZext_h) {
- DriverStr(RepeatCRShort(&Riscv64Assembler::CZext_h, "c.zext.h {reg}"), "CZext_h");
+TEST_F(AssemblerRISCV64Test, CZextH) {
+ DriverStr(RepeatCRShort(&Riscv64Assembler::CZextH, "c.zext.h {reg}"), "CZextH");
}
-TEST_F(AssemblerRISCV64Test, CSext_h) {
- DriverStr(RepeatCRShort(&Riscv64Assembler::CSext_h, "c.sext.h {reg}"), "CSext_h");
+TEST_F(AssemblerRISCV64Test, CSextH) {
+ DriverStr(RepeatCRShort(&Riscv64Assembler::CSextH, "c.sext.h {reg}"), "CSextH");
}
-TEST_F(AssemblerRISCV64Test, CZext_w) {
- DriverStr(RepeatCRShort(&Riscv64Assembler::CZext_w, "c.zext.w {reg}"), "CZext_w");
+TEST_F(AssemblerRISCV64Test, CZextW) {
+ DriverStr(RepeatCRShort(&Riscv64Assembler::CZextW, "c.zext.w {reg}"), "CZextW");
}
TEST_F(AssemblerRISCV64Test, CNot) {
@@ -7875,48 +7908,81 @@ TEST_F(AssemblerRISCV64Test, NegW) {
}
TEST_F(AssemblerRISCV64Test, SextB) {
+ DriverStr(RepeatRR(&Riscv64Assembler::SextB, "sext.b {reg1}, {reg2}\n"), "SextB");
+}
+
+TEST_F(AssemblerRISCV64Test, SextB_WithoutC) {
ScopedCSuppression scs(this);
- // Note: SEXT.B from the Zbb extension is not supported.
- DriverStr(RepeatRR(&Riscv64Assembler::SextB,
- "slli {reg1}, {reg2}, 56\n"
- "srai {reg1}, {reg1}, 56"),
- "SextB");
+ DriverStr(RepeatRR(&Riscv64Assembler::SextB, "sext.b {reg1}, {reg2}\n"), "SextB_WithoutC");
+}
+
+// TODO: Add test `SextB_WithoutZbb` when `Slli()` and `Srai()` auto-forward to 16-bit functions.
+TEST_F(AssemblerRISCV64Test, SextB_WithoutZbbAndC) {
+ ScopedZbbAndCSuppression scs(this);
+ DriverStr(RepeatRR(&Riscv64Assembler::SextB, "sext.b {reg1}, {reg2}\n"), "SextB_WithoutZbbAndC");
}
TEST_F(AssemblerRISCV64Test, SextH) {
+ DriverStr(RepeatRR(&Riscv64Assembler::SextH, "sext.h {reg1}, {reg2}\n"), "SextH");
+}
+
+TEST_F(AssemblerRISCV64Test, SextH_WithoutC) {
ScopedCSuppression scs(this);
- // Note: SEXT.H from the Zbb extension is not supported.
- DriverStr(RepeatRR(&Riscv64Assembler::SextH,
- "slli {reg1}, {reg2}, 48\n"
- "srai {reg1}, {reg1}, 48"),
- "SextH");
+ DriverStr(RepeatRR(&Riscv64Assembler::SextH, "sext.h {reg1}, {reg2}\n"), "SextH_WithoutC");
+}
+
+// TODO: Add test `SextH_WithoutZbb` when `Slli()` and `Srai()` auto-forward to 16-bit functions.
+TEST_F(AssemblerRISCV64Test, SextH_WithoutZbbAndC) {
+ ScopedZbbAndCSuppression scs(this);
+ DriverStr(RepeatRR(&Riscv64Assembler::SextH, "sext.h {reg1}, {reg2}\n"), "SextH_WithoutZbbAndC");
}
TEST_F(AssemblerRISCV64Test, SextW) {
+ DriverStr(RepeatRR(&Riscv64Assembler::SextW, "sext.w {reg1}, {reg2}\n"), "SextW");
+}
+
+TEST_F(AssemblerRISCV64Test, SextW_WithoutC) {
ScopedCSuppression scs(this);
- DriverStr(RepeatRR(&Riscv64Assembler::SextW, "addiw {reg1}, {reg2}, 0\n"), "SextW");
+ DriverStr(RepeatRR(&Riscv64Assembler::SextW, "sext.w {reg1}, {reg2}\n"), "SextW_WithoutC");
}
TEST_F(AssemblerRISCV64Test, ZextB) {
+ DriverStr(RepeatRR(&Riscv64Assembler::ZextB, "zext.b {reg1}, {reg2}"), "ZextB");
+}
+
+TEST_F(AssemblerRISCV64Test, ZextB_WithoutC) {
ScopedCSuppression scs(this);
- DriverStr(RepeatRR(&Riscv64Assembler::ZextB, "andi {reg1}, {reg2}, 255"), "ZextB");
+ DriverStr(RepeatRR(&Riscv64Assembler::ZextB, "zext.b {reg1}, {reg2}"), "ZextB_WithoutC");
}
TEST_F(AssemblerRISCV64Test, ZextH) {
+ DriverStr(RepeatRR(&Riscv64Assembler::ZextH, "zext.h {reg1}, {reg2}\n"), "ZextH");
+}
+
+TEST_F(AssemblerRISCV64Test, ZextH_WithoutC) {
ScopedCSuppression scs(this);
- // Note: ZEXT.H from the Zbb extension is not supported.
- DriverStr(RepeatRR(&Riscv64Assembler::ZextH,
- "slli {reg1}, {reg2}, 48\n"
- "srli {reg1}, {reg1}, 48"),
- "SextH");
+ DriverStr(RepeatRR(&Riscv64Assembler::ZextH, "zext.h {reg1}, {reg2}\n"), "ZextH_WithoutC");
+}
+
+// TODO: Add test `ZextH_WithoutZbb` when `Slli()` and `Srli()` auto-forward to 16-bit functions.
+TEST_F(AssemblerRISCV64Test, ZextH_WithoutZbbAndC) {
+ ScopedZbbAndCSuppression scs(this);
+ DriverStr(RepeatRR(&Riscv64Assembler::ZextH, "zext.h {reg1}, {reg2}\n"), "ZextH_WithoutZbbAndC");
}
TEST_F(AssemblerRISCV64Test, ZextW) {
+ DriverStr(RepeatRR(&Riscv64Assembler::ZextW, "zext.w {reg1}, {reg2}\n"), "ZextW");
+}
+
+TEST_F(AssemblerRISCV64Test, ZextW_WithoutC) {
ScopedCSuppression scs(this);
- DriverStr(RepeatRR(&Riscv64Assembler::ZextW,
- "slli {reg1}, {reg2}, 32\n"
- "srli {reg1}, {reg1}, 32"),
- "ZextW");
+ DriverStr(RepeatRR(&Riscv64Assembler::ZextW, "zext.w {reg1}, {reg2}\n"), "ZextW_WithoutC");
+}
+
+// TODO: Add test `ZextW_WithoutZba` when `Slli()` and `Srli()` auto-forward to 16-bit functions.
+TEST_F(AssemblerRISCV64Test, ZextW_WithoutZbaAndC) {
+ ScopedZbaAndCSuppression scs(this);
+ DriverStr(RepeatRR(&Riscv64Assembler::ZextW, "zext.w {reg1}, {reg2}\n"), "ZextW_WithoutZbaAndC");
}
TEST_F(AssemblerRISCV64Test, Seqz) {
diff --git a/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc b/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc
index 00e1f54d03..e2ef9c4b7f 100644
--- a/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc
+++ b/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc
@@ -472,10 +472,13 @@ void Riscv64JNIMacroAssembler::TryToTransitionFromRunnableToNative(
__ Bind(&retry);
static_assert(thread_flags_offset.Int32Value() == 0); // LR/SC require exact address.
__ LrW(scratch, TR, AqRl::kNone);
- __ Li(scratch2, kNativeStateValue);
- // If any flags are set, go to the slow path.
- static_assert(kRunnableStateValue == 0u);
- __ Bnez(scratch, Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
+ {
+ ScopedLrScExtensionsRestriction slser(&asm_);
+ __ Li(scratch2, kNativeStateValue);
+ // If any flags are set, go to the slow path.
+ static_assert(kRunnableStateValue == 0u);
+ __ Bnez(scratch, Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
+ }
__ ScW(scratch, scratch2, TR, AqRl::kRelease);
__ Bnez(scratch, &retry);
@@ -506,11 +509,14 @@ void Riscv64JNIMacroAssembler::TryToTransitionFromNativeToRunnable(
__ Bind(&retry);
static_assert(thread_flags_offset.Int32Value() == 0); // LR/SC require exact address.
__ LrW(scratch, TR, AqRl::kAcquire);
- __ Li(scratch2, kNativeStateValue);
- // If any flags are set, or the state is not Native, go to the slow path.
- // (While the thread can theoretically transition between different Suspended states,
- // it would be very unexpected to see a state other than Native at this point.)
- __ Bne(scratch, scratch2, Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
+ {
+ ScopedLrScExtensionsRestriction slser(&asm_);
+ __ Li(scratch2, kNativeStateValue);
+ // If any flags are set, or the state is not Native, go to the slow path.
+ // (While the thread can theoretically transition between different Suspended states,
+ // it would be very unexpected to see a state other than Native at this point.)
+ __ Bne(scratch, scratch2, Riscv64JNIMacroLabel::Cast(label)->AsRiscv64());
+ }
static_assert(kRunnableStateValue == 0u);
__ ScW(scratch, Zero, TR, AqRl::kNone);
__ Bnez(scratch, &retry);
diff --git a/dex2oat/art_standalone_dex2oat_cts_tests.xml b/dex2oat/art_standalone_dex2oat_cts_tests.xml
index 254f11d3d7..b6f4a3d5f2 100644
--- a/dex2oat/art_standalone_dex2oat_cts_tests.xml
+++ b/dex2oat/art_standalone_dex2oat_cts_tests.xml
@@ -54,8 +54,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/art_standalone_dex2oat_cts_tests" />
<option name="module-name" value="art_standalone_dex2oat_cts_tests" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/dex2oat/art_standalone_dex2oat_tests.xml b/dex2oat/art_standalone_dex2oat_tests.xml
index 7b53f1f101..d0c3f40738 100644
--- a/dex2oat/art_standalone_dex2oat_tests.xml
+++ b/dex2oat/art_standalone_dex2oat_tests.xml
@@ -73,8 +73,6 @@
<option name="test-case-timeout" value="5m" />
<option name="native-test-device-path" value="/data/local/tmp/art_standalone_dex2oat_tests" />
<option name="module-name" value="art_standalone_dex2oat_tests" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/dexoptanalyzer/art_standalone_dexoptanalyzer_tests.xml b/dexoptanalyzer/art_standalone_dexoptanalyzer_tests.xml
index c04deaa488..07e5ed6c27 100644
--- a/dexoptanalyzer/art_standalone_dexoptanalyzer_tests.xml
+++ b/dexoptanalyzer/art_standalone_dexoptanalyzer_tests.xml
@@ -54,8 +54,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/art_standalone_dexoptanalyzer_tests" />
<option name="module-name" value="art_standalone_dexoptanalyzer_tests" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/libartservice/service/Android.bp b/libartservice/service/Android.bp
index efd0f10d2d..652d49d50e 100644
--- a/libartservice/service/Android.bp
+++ b/libartservice/service/Android.bp
@@ -32,7 +32,6 @@ cc_defaults {
],
export_include_dirs: ["native"],
shared_libs: [
- "libarttools", // Contains "libc++fs".
"libbase",
"libnativehelper",
],
@@ -48,6 +47,7 @@ cc_library {
shared_libs: [
"libart",
"libartbase",
+ "libarttools", // Contains "libc++fs".
],
}
@@ -64,6 +64,7 @@ cc_library {
shared_libs: [
"libartd",
"libartbased",
+ "libarttools", // Contains "libc++fs".
],
}
@@ -198,6 +199,9 @@ art_cc_test {
"art_gtest_defaults",
"art_libartservice_tests_defaults",
],
+ shared_libs: [
+ "libarttools",
+ ],
}
// Standalone version of ART gtest `art_libartservice_tests`, not bundled with the ART APEX on
@@ -208,6 +212,9 @@ art_cc_test {
"art_standalone_gtest_defaults",
"art_libartservice_tests_defaults",
],
+ static_libs: [
+ "libarttools",
+ ],
}
android_test {
diff --git a/libartservice/service/java/com/android/server/art/ReasonMapping.java b/libartservice/service/java/com/android/server/art/ReasonMapping.java
index 7c64abfab8..2a1c81ba7f 100644
--- a/libartservice/service/java/com/android/server/art/ReasonMapping.java
+++ b/libartservice/service/java/com/android/server/art/ReasonMapping.java
@@ -188,13 +188,14 @@ public class ReasonMapping {
/**
* Loads the concurrency from the system property, for batch dexopt ({@link
- * ArtManagerLocal#dexoptPackages}), or 1 if the system property is not found or cannot be
- * parsed.
+ * ArtManagerLocal#dexoptPackages}). The default is tuned to strike a good balance between
+ * device load and dexopt coverage, depending on the situation.
*
* @hide
*/
public static int getConcurrencyForReason(@NonNull @BatchDexoptReason String reason) {
return SystemProperties.getInt("persist.device_config.runtime." + reason + "_concurrency",
- SystemProperties.getInt("pm.dexopt." + reason + ".concurrency", 1 /* def */));
+ SystemProperties.getInt("pm.dexopt." + reason + ".concurrency",
+ reason.equals(REASON_BG_DEXOPT) ? 4 : 1 /* def */));
}
}
diff --git a/libarttools/Android.bp b/libarttools/Android.bp
index 5997e5d3c1..2c036fb96b 100644
--- a/libarttools/Android.bp
+++ b/libarttools/Android.bp
@@ -59,7 +59,6 @@ art_cc_defaults {
"tools_test.cc",
],
shared_libs: [
- "libarttools",
"libbase",
],
static_libs: [
@@ -80,6 +79,9 @@ art_cc_test {
"art_gtest_defaults",
"art_libarttools_tests_defaults",
],
+ shared_libs: [
+ "libarttools",
+ ],
}
// Standalone version of ART gtest `art_libarttools_tests`, not bundled with the ART APEX on
@@ -90,6 +92,9 @@ art_cc_test {
"art_standalone_gtest_defaults",
"art_libarttools_tests_defaults",
],
+ static_libs: [
+ "libarttools",
+ ],
}
cc_binary {
diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp
index ad528c1dd2..b51456c6d9 100644
--- a/libdexfile/Android.bp
+++ b/libdexfile/Android.bp
@@ -385,7 +385,6 @@ art_cc_defaults {
"external/dex_file_ext_test.cc",
],
shared_libs: [
- "libartbase",
"libdexfile",
],
header_libs: [
@@ -467,7 +466,6 @@ art_cc_defaults {
"external/dex_file_supp_test.cc",
],
shared_libs: [
- "libartbase",
"libbase",
"libdexfile",
"liblog",
diff --git a/libdexfile/art_standalone_libdexfile_tests.xml b/libdexfile/art_standalone_libdexfile_tests.xml
index ca30d749e2..4f18d8ab8b 100644
--- a/libdexfile/art_standalone_libdexfile_tests.xml
+++ b/libdexfile/art_standalone_libdexfile_tests.xml
@@ -37,8 +37,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/art_standalone_libdexfile_tests" />
<option name="module-name" value="art_standalone_libdexfile_tests" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/libprofile/art_standalone_libprofile_tests.xml b/libprofile/art_standalone_libprofile_tests.xml
index fc4ebdc389..8a4cf8d627 100644
--- a/libprofile/art_standalone_libprofile_tests.xml
+++ b/libprofile/art_standalone_libprofile_tests.xml
@@ -33,8 +33,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/art_standalone_libprofile_tests" />
<option name="module-name" value="art_standalone_libprofile_tests" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/oatdump/art_standalone_oatdump_tests.xml b/oatdump/art_standalone_oatdump_tests.xml
index bb5cc0a2cd..15dba47e93 100644
--- a/oatdump/art_standalone_oatdump_tests.xml
+++ b/oatdump/art_standalone_oatdump_tests.xml
@@ -45,8 +45,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/art_standalone_oatdump_tests" />
<option name="module-name" value="art_standalone_oatdump_tests" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/odrefresh/Android.bp b/odrefresh/Android.bp
index 7b9aad7913..8b6ea8de6f 100644
--- a/odrefresh/Android.bp
+++ b/odrefresh/Android.bp
@@ -41,7 +41,6 @@ cc_defaults {
"art-odrefresh-operator-srcs",
],
shared_libs: [
- "libarttools", // Contains "libc++fs".
"libbase",
"liblog",
"libselinux",
@@ -65,6 +64,7 @@ cc_defaults {
],
srcs: ["odrefresh_main.cc"],
shared_libs: [
+ "libarttools", // Contains "libc++fs".
"libdexfile",
],
target: {
@@ -210,6 +210,9 @@ art_cc_test {
"art_odrefresh_tests_defaults",
],
host_supported: false,
+ shared_libs: [
+ "libarttools",
+ ],
// The test config template is needed even though it's not used by the test
// runner. Otherwise, Soong will generate a test config, which is adding
// `art-host-test` as a test tag, while this test does not support running
@@ -225,6 +228,9 @@ art_cc_test {
"art_standalone_gtest_defaults",
"art_odrefresh_tests_defaults",
],
+ static_libs: [
+ "libarttools",
+ ],
}
genrule {
diff --git a/profman/art_standalone_profman_tests.xml b/profman/art_standalone_profman_tests.xml
index 94a54a7875..875f263d8f 100644
--- a/profman/art_standalone_profman_tests.xml
+++ b/profman/art_standalone_profman_tests.xml
@@ -46,8 +46,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/art_standalone_profman_tests" />
<option name="module-name" value="art_standalone_profman_tests" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
<!-- The following tests from `art_standalone_profman_tests` are currently failing when
run as 32-bit on a 64-bit device, because they try to execute other system (64-bit)
diff --git a/runtime/art_standalone_runtime_tests.xml b/runtime/art_standalone_runtime_tests.xml
index 76c1e5434c..d0e959c345 100644
--- a/runtime/art_standalone_runtime_tests.xml
+++ b/runtime/art_standalone_runtime_tests.xml
@@ -81,8 +81,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/art_standalone_runtime_tests" />
<option name="module-name" value="art_standalone_runtime_tests" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
<!-- The following tests from `art_standalone_runtime_tests` are currently failing
(observed on `aosp_cf_x86_64_phone-userdebug`).
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 54a56f2939..90e909595e 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -163,6 +163,13 @@ bool Jit::CompileMethodInternal(ArtMethod* method,
compilation_kind = CompilationKind::kBaseline;
}
+ if (method->IsPreCompiled() && !prejit) {
+ VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
+ << " due to method marked pre-compile,"
+ << " and the compilation request isn't for pre-compilation.";
+ return false;
+ }
+
// If we're asked to compile baseline, but we cannot allocate profiling infos,
// change the compilation kind to optimized.
if ((compilation_kind == CompilationKind::kBaseline) &&
@@ -1366,6 +1373,7 @@ void Jit::EnqueueOptimizedCompilation(ArtMethod* method, Thread* self) {
if (thread_pool_ == nullptr) {
return;
}
+
// We arrive here after a baseline compiled code has reached its baseline
// hotness threshold. If we're not only using the baseline compiler, enqueue a compilation
// task that will compile optimize the method.
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 2fcc4b065b..57d606cab6 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -279,7 +279,21 @@ inline void Thread::CheckActiveSuspendBarriers() {
}
}
+inline void Thread::CheckBarrierInactive(WrappedSuspend1Barrier* suspend1_barrier) {
+ for (WrappedSuspend1Barrier* w = tlsPtr_.active_suspend1_barriers; w != nullptr; w = w->next_) {
+ CHECK_EQ(w->magic_, WrappedSuspend1Barrier::kMagic)
+ << "first = " << tlsPtr_.active_suspend1_barriers << " current = " << w
+ << " next = " << w->next_;
+ CHECK_NE(w, suspend1_barrier);
+ }
+}
+
inline void Thread::AddSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier) {
+ if (tlsPtr_.active_suspend1_barriers != nullptr) {
+ CHECK_EQ(tlsPtr_.active_suspend1_barriers->magic_, WrappedSuspend1Barrier::kMagic)
+ << "first = " << tlsPtr_.active_suspend1_barriers;
+ }
+ CHECK_EQ(suspend1_barrier->magic_, WrappedSuspend1Barrier::kMagic);
suspend1_barrier->next_ = tlsPtr_.active_suspend1_barriers;
tlsPtr_.active_suspend1_barriers = suspend1_barrier;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 03632887d0..2ebbe1327d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1513,6 +1513,9 @@ bool Thread::PassActiveSuspendBarriers() {
tlsPtr_.active_suspendall_barrier = nullptr;
}
for (WrappedSuspend1Barrier* w = tlsPtr_.active_suspend1_barriers; w != nullptr; w = w->next_) {
+ CHECK_EQ(w->magic_, WrappedSuspend1Barrier::kMagic)
+ << "first = " << tlsPtr_.active_suspend1_barriers << " current = " << w
+ << " next = " << w->next_;
pass_barriers.push_back(&(w->barrier_));
}
tlsPtr_.active_suspend1_barriers = nullptr;
@@ -1685,9 +1688,9 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function, ThreadState wait_st
// Although this is a thread suspension, the target thread only blocks while we run the
// checkpoint, which is presumed to terminate quickly even if other threads are blocked.
// Note: IncrementSuspendCount also expects the thread_list_lock to be held unless this == self.
+ WrappedSuspend1Barrier wrapped_barrier{};
{
bool is_suspended = false;
- WrappedSuspend1Barrier wrapped_barrier{};
{
MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
@@ -1767,6 +1770,9 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function, ThreadState wait_st
DCHECK_NE(GetState(), ThreadState::kRunnable);
DCHECK_GT(GetSuspendCount(), 0);
DecrementSuspendCount(self);
+ if (kIsDebugBuild) {
+ CheckBarrierInactive(&wrapped_barrier);
+ }
resume_cond_->Broadcast(self);
}
diff --git a/runtime/thread.h b/runtime/thread.h
index a59b10ae13..8bcd8153ae 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -199,8 +199,11 @@ enum class WeakRefAccessState : int32_t {
// See Thread.tlsPtr_.active_suspend1_barriers below for explanation.
struct WrappedSuspend1Barrier {
- WrappedSuspend1Barrier() : barrier_(1), next_(nullptr) {}
- AtomicInteger barrier_; // Only updated while holding thread_suspend_count_lock_ .
+ // TODO(b/23668816): At least weaken CHECKs to DCHECKs once the bug is fixed.
+ static constexpr int kMagic = 0xba8;
+ WrappedSuspend1Barrier() : magic_(kMagic), barrier_(1), next_(nullptr) {}
+ int magic_;
+ AtomicInteger barrier_;
struct WrappedSuspend1Barrier* next_ GUARDED_BY(Locks::thread_suspend_count_lock_);
};
@@ -1429,21 +1432,18 @@ class EXPORT Thread {
}
// Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
// equal to a valid pointer.
- // TODO: does this need to atomic? I don't think so.
void RemoveSuspendTrigger() {
- tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
+ tlsPtr_.suspend_trigger.store(reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger),
+ std::memory_order_relaxed);
}
// Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
// The next time a suspend check is done, it will load from the value at this address
// and trigger a SIGSEGV.
- // Only needed if Runtime::implicit_suspend_checks_ is true and fully implemented. It currently
- // is always false. Client code currently just looks at the thread flags directly to determine
- // whether we should suspend, so this call is currently unnecessary.
- void TriggerSuspend() {
- tlsPtr_.suspend_trigger = nullptr;
- }
-
+ // Only needed if Runtime::implicit_suspend_checks_ is true. On some platforms, and in the
+ // interpreter, client code currently just looks at the thread flags directly to determine
+ // whether we should suspend, so this call is not always necessary.
+ void TriggerSuspend() { tlsPtr_.suspend_trigger.store(nullptr, std::memory_order_release); }
// Push an object onto the allocation stack.
bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
@@ -1772,6 +1772,10 @@ class EXPORT Thread {
ALWAYS_INLINE bool HasActiveSuspendBarrier() REQUIRES(Locks::thread_suspend_count_lock_);
+ // CHECK that the given barrier is no longer on our list.
+ ALWAYS_INLINE void CheckBarrierInactive(WrappedSuspend1Barrier* suspend1_barrier)
+ REQUIRES(Locks::thread_suspend_count_lock_);
+
// Registers the current thread as the jit sensitive thread. Should be called just once.
static void SetJitSensitiveThread() {
if (jit_sensitive_thread_ == nullptr) {
@@ -2150,8 +2154,12 @@ class EXPORT Thread {
ManagedStack managed_stack;
// In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check. It is
- // normally set to the address of itself.
- uintptr_t* suspend_trigger;
+ // normally set to the address of itself. It should be cleared with release semantics to ensure
+ // that prior state changes etc. are visible to any thread that faults as a result.
+ // We assume that the kernel ensures that such changes are then visible to the faulting
+ // thread, even if it is not an acquire load that faults. (Indeed, it seems unlikely that the
+ // ordering semantics associated with the faulting load has any impact.)
+ std::atomic<uintptr_t*> suspend_trigger;
// Every thread may have an associated JNI environment
JNIEnvExt* jni_env;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 5e63b27b20..02e3f5475a 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1089,6 +1089,8 @@ bool ThreadList::SuspendThread(Thread* self,
if (thread->IsSuspended()) {
// See the discussion in mutator_gc_coord.md and SuspendAllInternal for the race here.
thread->RemoveFirstSuspend1Barrier(&wrapped_barrier);
+ // PassActiveSuspendBarriers couldn't have seen our barrier, since it also acquires
+ // 'thread_suspend_count_lock_'. `wrapped_barrier` will not be accessed.
if (!thread->HasActiveSuspendBarrier()) {
thread->AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
}
@@ -1127,9 +1129,6 @@ bool ThreadList::SuspendThread(Thread* self,
// Now wait for target to decrement suspend barrier.
std::optional<std::string> failure_info;
if (!is_suspended) {
- // As an experiment, redundantly trigger suspension. TODO: Remove this.
- std::atomic_thread_fence(std::memory_order_seq_cst);
- thread->TriggerSuspend();
failure_info = WaitForSuspendBarrier(&wrapped_barrier.barrier_, tid, attempt_of_4);
if (!failure_info.has_value()) {
is_suspended = true;
@@ -1188,7 +1187,7 @@ bool ThreadList::SuspendThread(Thread* self,
}
is_suspended = true;
}
- // wrapped_barrier.barrier_ has been decremented and will no longer be accessed.
+ // wrapped_barrier.barrier_ will no longer be accessed.
VLOG(threads) << func_name << " suspended: " << *thread;
if (ATraceEnabled()) {
std::string name;
@@ -1197,7 +1196,11 @@ bool ThreadList::SuspendThread(Thread* self,
StringPrintf("%s suspended %s for tid=%d", func_name, name.c_str(), thread->GetTid())
.c_str());
}
- DCHECK(thread->IsSuspended());
+ if (kIsDebugBuild) {
+ CHECK(thread->IsSuspended());
+ MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
+ thread->CheckBarrierInactive(&wrapped_barrier);
+ }
return true;
}
diff --git a/test/2243-checker-not-inline-into-throw/src/Main.java b/test/2243-checker-not-inline-into-throw/src/Main.java
index 6f1280c026..f1d60a129d 100644
--- a/test/2243-checker-not-inline-into-throw/src/Main.java
+++ b/test/2243-checker-not-inline-into-throw/src/Main.java
@@ -32,14 +32,18 @@ public class Main {
// Empty methods are easy to inline anywhere.
private static void easyToInline() {}
private static void $inline$easyToInline() {}
+ private static void twoLevelEasyToInline() { easyToInline(); }
/// CHECK-START: int Main.$noinline$testEndsWithThrow() inliner (before)
- /// CHECK: InvokeStaticOrDirect method_name:Main.easyToInline
+ /// CHECK: InvokeStaticOrDirect method_name:Main.twoLevelEasyToInline
/// CHECK-START: int Main.$noinline$testEndsWithThrow() inliner (after)
- /// CHECK: InvokeStaticOrDirect method_name:Main.easyToInline
+ /// CHECK: InvokeStaticOrDirect method_name:Main.twoLevelEasyToInline
static int $noinline$testEndsWithThrow() {
- easyToInline();
+ // Use two level inlining to avoid a pattern match in the inliner.
+ // The pattern matching is deliberately done before we check if inlining is "encouraged"
+ // which includes checking if the block ends with a `throw`.
+ twoLevelEasyToInline();
throw new Error("");
}
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index 7c6d6091ee..13c42d228d 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -886,6 +886,91 @@ public class Main {
return arg & ~arg;
}
+ /// CHECK-START: int Main.OrSelfNegated(int) constant_folding (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Not:i\d+>> Not [<<Arg>>]
+ /// CHECK-DAG: <<Or:i\d+>> Or [<<Not>>,<<Arg>>]
+ /// CHECK-DAG: Return [<<Or>>]
+
+ /// CHECK-START: int Main.OrSelfNegated(int) constant_folding (after)
+ /// CHECK-DAG: <<Const:i\d+>> IntConstant -1
+ /// CHECK-DAG: Return [<<Const>>]
+
+ /// CHECK-START: int Main.OrSelfNegated(int) constant_folding (after)
+ /// CHECK-NOT: Or
+
+ public static int OrSelfNegated(int arg) {
+ return arg | ~arg;
+ }
+
+ /// CHECK-START: int Main.XorSelfNegated(int) constant_folding (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Not:i\d+>> Not [<<Arg>>]
+ /// CHECK-DAG: <<Xor:i\d+>> Xor [<<Not>>,<<Arg>>]
+ /// CHECK-DAG: Return [<<Xor>>]
+
+ /// CHECK-START: int Main.XorSelfNegated(int) constant_folding (after)
+ /// CHECK-DAG: <<Const:i\d+>> IntConstant -1
+ /// CHECK-DAG: Return [<<Const>>]
+
+ /// CHECK-START: int Main.XorSelfNegated(int) constant_folding (after)
+ /// CHECK-NOT: Xor
+
+ public static int XorSelfNegated(int arg) {
+ return arg ^ ~arg;
+ }
+
+ /// CHECK-START: long Main.AndSelfNegated(long) constant_folding (before)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Not:j\d+>> Not [<<Arg>>]
+ /// CHECK-DAG: <<And:j\d+>> And [<<Not>>,<<Arg>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: long Main.AndSelfNegated(long) constant_folding (after)
+ /// CHECK-DAG: <<Const0:j\d+>> LongConstant 0
+ /// CHECK-DAG: Return [<<Const0>>]
+
+ /// CHECK-START: long Main.AndSelfNegated(long) constant_folding (after)
+ /// CHECK-NOT: And
+
+ public static long AndSelfNegated(long arg) {
+ return arg & ~arg;
+ }
+
+ /// CHECK-START: long Main.OrSelfNegated(long) constant_folding (before)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Not:j\d+>> Not [<<Arg>>]
+ /// CHECK-DAG: <<Or:j\d+>> Or [<<Not>>,<<Arg>>]
+ /// CHECK-DAG: Return [<<Or>>]
+
+ /// CHECK-START: long Main.OrSelfNegated(long) constant_folding (after)
+ /// CHECK-DAG: <<Const:j\d+>> LongConstant -1
+ /// CHECK-DAG: Return [<<Const>>]
+
+ /// CHECK-START: long Main.OrSelfNegated(long) constant_folding (after)
+ /// CHECK-NOT: Or
+
+ public static long OrSelfNegated(long arg) {
+ return arg | ~arg;
+ }
+
+ /// CHECK-START: long Main.XorSelfNegated(long) constant_folding (before)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Not:j\d+>> Not [<<Arg>>]
+ /// CHECK-DAG: <<Xor:j\d+>> Xor [<<Not>>,<<Arg>>]
+ /// CHECK-DAG: Return [<<Xor>>]
+
+ /// CHECK-START: long Main.XorSelfNegated(long) constant_folding (after)
+ /// CHECK-DAG: <<Const:j\d+>> LongConstant -1
+ /// CHECK-DAG: Return [<<Const>>]
+
+ /// CHECK-START: long Main.XorSelfNegated(long) constant_folding (after)
+ /// CHECK-NOT: Xor
+
+ public static long XorSelfNegated(long arg) {
+ return arg ^ ~arg;
+ }
+
/**
* Exercise constant folding on logical or.
diff --git a/test/476-checker-ctor-fence-redun-elim/src/Main.java b/test/476-checker-ctor-fence-redun-elim/src/Main.java
index 05f2f7c5cf..b065b1315e 100644
--- a/test/476-checker-ctor-fence-redun-elim/src/Main.java
+++ b/test/476-checker-ctor-fence-redun-elim/src/Main.java
@@ -32,6 +32,13 @@ class Base {
int w2;
int w3;
+ Base() {
+ // Prevent inliner from matching the code pattern when calling this constructor
+ // to test the normal inlining that builds and inserts the callee graph.
+ // (Pattern matching can merge or eliminate constructor barriers.)
+ $inline$nop();
+ }
+
@Override
public String toString() {
return getClass().getName() + "(" + baseString() + ")";
@@ -40,6 +47,8 @@ class Base {
protected String baseString() {
return String.format("w0: %d, w1: %d, w2: %d, w3: %d", w0, w1, w2, w3);
}
+
+ private void $inline$nop() {}
}
// This has a final field in its constructor, so there must be a field freeze
diff --git a/test/476-checker-ctor-memory-barrier/src/Main.java b/test/476-checker-ctor-memory-barrier/src/Main.java
index e887cd32a0..f4ae3a9182 100644
--- a/test/476-checker-ctor-memory-barrier/src/Main.java
+++ b/test/476-checker-ctor-memory-barrier/src/Main.java
@@ -61,7 +61,9 @@ class ClassWithFinals {
/// CHECK-NOT: {{[slm]}}fence
public ClassWithFinals() {
// Exactly one constructor barrier.
- x = 0;
+ // Note: Do not store 0 as that can be eliminated together with the constructor
+ // barrier by the code pattern substitution in the inliner.
+ x = 1;
}
/// CHECK-START: void ClassWithFinals.<init>(int) inliner (after)
diff --git a/test/569-checker-pattern-replacement/build.py b/test/569-checker-pattern-replacement/build.py
new file mode 100644
index 0000000000..abe80d8bbb
--- /dev/null
+++ b/test/569-checker-pattern-replacement/build.py
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2024 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def build(ctx):
+ if ctx.jvm:
+ return # The test does not build on JVM
+ ctx.default_build()
diff --git a/test/569-checker-pattern-replacement/src-multidex/Second.java b/test/569-checker-pattern-replacement/src-multidex/Second.java
index 89835c6f22..6528806a0c 100644
--- a/test/569-checker-pattern-replacement/src-multidex/Second.java
+++ b/test/569-checker-pattern-replacement/src-multidex/Second.java
@@ -14,9 +14,14 @@
* limitations under the License.
*/
+import dalvik.annotation.optimization.NeverInline;
+
public final class Second {
public static void staticNop(int unused) { }
+ @NeverInline
+ public static void staticNopNeverInline(int unused) { }
+
public void nop() { }
public static Object staticReturnArg2(int unused1, String arg2) {
diff --git a/test/569-checker-pattern-replacement/src/Main.java b/test/569-checker-pattern-replacement/src/Main.java
index 8951d3058b..9f4598d34b 100644
--- a/test/569-checker-pattern-replacement/src/Main.java
+++ b/test/569-checker-pattern-replacement/src/Main.java
@@ -15,6 +15,26 @@
*/
public class Main {
+ static class ExpectedError extends Error {}
+
+ public static void localStaticNopAndThrow() {
+ // Pattern matching replaces the invoke even in a block that ends with a `throw`.
+ $inline$localStaticNop();
+ throw new ExpectedError();
+ }
+
+ public static void $inline$localStaticNop() {}
+
+ /// CHECK-START: void Main.staticNopNeverInline() inliner (before)
+ /// CHECK: InvokeStaticOrDirect
+
+ /// CHECK-START: void Main.staticNopNeverInline() inliner (after)
+ /// CHECK: InvokeStaticOrDirect
+
+ public static void staticNopNeverInline() {
+ Second.staticNopNeverInline(11);
+ }
+
/// CHECK-START: void Main.staticNop() inliner (before)
/// CHECK: InvokeStaticOrDirect
@@ -1177,6 +1197,8 @@ public class Main {
// Replaced NOP pattern.
staticNop();
nop(s);
+ // Not replaced NOP pattern.
+ staticNopNeverInline();
// Replaced "return arg" pattern.
assertEquals("arbitrary string", staticReturnArg2("arbitrary string"));
assertEquals(4321L, returnArg1(s, 4321L));
@@ -1259,6 +1281,11 @@ public class Main {
assertEquals(123, constructDerivedInSecondDex(123));
assertEquals(0, constructDerivedInSecondDexWith0());
assertEquals(0, constructDerivedInSecondDex(7L));
+
+ try {
+ localStaticNopAndThrow();
+ throw new Error("Unreachable");
+ } catch (ExpectedError expected) {}
}
private static void assertEquals(int expected, int actual) {
diff --git a/test/639-checker-code-sinking/src/Main.java b/test/639-checker-code-sinking/src/Main.java
index f8c1d9d4e7..6d1cded67f 100644
--- a/test/639-checker-code-sinking/src/Main.java
+++ b/test/639-checker-code-sinking/src/Main.java
@@ -17,8 +17,14 @@
public class Main {
static class ValueHolder {
int getValue() {
+ // Prevent inliner from matching the code pattern when calling this method to test
+ // the normal inlining path that does not inline in blocks that end with a `throw`.
+ $inline$nop();
+
return 1;
}
+
+ private void $inline$nop() {}
}
public static void main(String[] args) throws Exception {
diff --git a/test/art-gtests-target-standalone-cts-template.xml b/test/art-gtests-target-standalone-cts-template.xml
index a5ac30bfee..42ad284978 100644
--- a/test/art-gtests-target-standalone-cts-template.xml
+++ b/test/art-gtests-target-standalone-cts-template.xml
@@ -32,8 +32,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/{MODULE}" />
<option name="module-name" value="{MODULE}" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/test/art-gtests-target-standalone-template.xml b/test/art-gtests-target-standalone-template.xml
index d8981128d4..1c21620342 100644
--- a/test/art-gtests-target-standalone-template.xml
+++ b/test/art-gtests-target-standalone-template.xml
@@ -27,8 +27,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/{MODULE}" />
<option name="module-name" value="{MODULE}" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/test/art-gtests-target-standalone-with-boot-image-template.xml b/test/art-gtests-target-standalone-with-boot-image-template.xml
index 9d2a4c4ef3..7f44ba6a9b 100644
--- a/test/art-gtests-target-standalone-with-boot-image-template.xml
+++ b/test/art-gtests-target-standalone-with-boot-image-template.xml
@@ -40,8 +40,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/{MODULE}" />
<option name="module-name" value="{MODULE}" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/test/dexpreopt/art_standalone_dexpreopt_tests.xml b/test/dexpreopt/art_standalone_dexpreopt_tests.xml
index 67faa76586..cf459e65af 100644
--- a/test/dexpreopt/art_standalone_dexpreopt_tests.xml
+++ b/test/dexpreopt/art_standalone_dexpreopt_tests.xml
@@ -35,8 +35,6 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp/{MODULE}" />
<option name="module-name" value="{MODULE}" />
- <option name="ld-library-path-32" value="/apex/com.android.art/lib" />
- <option name="ld-library-path-64" value="/apex/com.android.art/lib64" />
</test>
<!-- Only run tests if the device under test is SDK version 31 (Android 12) or above. -->
diff --git a/test/knownfailures.json b/test/knownfailures.json
index b47a1a1b35..31158e31ca 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -58,12 +58,6 @@
"loaded systems."]
},
{
- "tests": "569-checker-pattern-replacement",
- "variant": "target",
- "description": ["569-checker-pattern-replacement tests behaviour",
- "present only on host."]
- },
- {
"tests": ["116-nodex2oat",
"118-noimage-dex2oat"],
"variant": "prebuild",
@@ -1576,5 +1570,17 @@
"description": ["Test relies on profiling done by baseline compiled code. Meanwhile, it",
"can't use --baseline because it has a test case checking the behavior when",
"a method is optimized compiled."]
+ },
+ {
+ "tests": ["004-ThreadStress",
+ "050-sync-test",
+ "083-compiler-regressions",
+ "137-cfi",
+ "913-heaps",
+ "2043-reference-pauses",
+ "2239-varhandle-perf"],
+ "bug": "b/328023607",
+ "description": ["Fails on QEMU"],
+ "env_vars": {"ART_TEST_ON_VM": "true"}
}
]
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index 8313756995..de24b4cde6 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -152,11 +152,10 @@ ART_TEST_ON_VM = _env.get('ART_TEST_ON_VM')
ART_TEST_SSH_PORT = _env.get('ART_TEST_SSH_PORT', 10001)
ART_TEST_SSH_USER = _env.get('ART_TEST_SSH_USER', 'ubuntu')
ART_TEST_SSH_HOST = _env.get('ART_TEST_SSH_HOST', 'localhost')
-ART_SSH_CMD = _env.get('ART_SSH_CMD', f"ssh -q -i ~/.ssh/ubuntu -p {ART_TEST_SSH_PORT} "
- f"-o StrictHostKeyChecking=no "
+ART_SSH_CONFIG = os.path.join(os.path.dirname(__file__), 'ssh_config')
+ART_SSH_CMD = _env.get('ART_SSH_CMD', f"ssh -q -F {ART_SSH_CONFIG} -p {ART_TEST_SSH_PORT} "
f"{ART_TEST_SSH_USER}@{ART_TEST_SSH_HOST}")
-ART_SCP_CMD = _env.get('ART_SCP_CMD', f"scp -i ~/.ssh/ubuntu -P {ART_TEST_SSH_PORT} "
- f"-o StrictHostKeyChecking=no -p -r")
+ART_SCP_CMD = _env.get('ART_SCP_CMD', f"scp -q -F {ART_SSH_CONFIG} -P {ART_TEST_SSH_PORT} -p -r")
ART_CHROOT_CMD = _env.get('ART_CHROOT_CMD', "unshare --user --map-root-user chroot art-test-chroot")
if ART_TEST_ON_VM:
ART_TEST_CHROOT = _env.get('ART_TEST_CHROOT', f"/home/{ART_TEST_SSH_USER}/art-test-chroot")
diff --git a/test/testrunner/ssh_config b/test/testrunner/ssh_config
new file mode 100644
index 0000000000..6d847e9e1d
--- /dev/null
+++ b/test/testrunner/ssh_config
@@ -0,0 +1,6 @@
+Host *
+ IdentityFile ~/.ssh/ubuntu
+ StrictHostKeyChecking no
+ ControlMaster auto
+ ControlPersist 10m
+ ControlPath /run/user/%i/ssh-%C
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 14fd0ca469..ab1098e1b0 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -316,7 +316,8 @@ def setup_test_env():
device_name = get_device_name()
if n_thread == 0:
# Use only part of the cores since fully loading the device tends to lead to timeouts.
- n_thread = max(1, int(get_target_cpu_count() * 0.75))
+ fraction = 1.0 if env.ART_TEST_ON_VM else 0.75
+ n_thread = max(1, int(get_target_cpu_count() * fraction))
if device_name == 'fugu':
n_thread = 1
else:
diff --git a/tools/luci/config/generated/cr-buildbucket.cfg b/tools/luci/config/generated/cr-buildbucket.cfg
index c6cdc023f8..c7d12b7cec 100644
--- a/tools/luci/config/generated/cr-buildbucket.cfg
+++ b/tools/luci/config/generated/cr-buildbucket.cfg
@@ -618,6 +618,7 @@ buckets {
properties_j: "device:\"qemu-armv8\""
properties_j: "generational_cc:true"
properties_j: "on_virtual_machine:true"
+ properties_j: "product:\"armv8\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -648,6 +649,7 @@ buckets {
properties_j: "device:\"qemu-riscv64\""
properties_j: "generational_cc:true"
properties_j: "on_virtual_machine:true"
+ properties_j: "product:\"riscv64\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -679,6 +681,7 @@ buckets {
properties_j: "device:\"qemu-riscv64\""
properties_j: "generational_cc:true"
properties_j: "on_virtual_machine:true"
+ properties_j: "product:\"riscv64\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
diff --git a/tools/luci/config/main.star b/tools/luci/config/main.star
index 0320ce68cf..dd7acab460 100755
--- a/tools/luci/config/main.star
+++ b/tools/luci/config/main.star
@@ -499,6 +499,7 @@ def host_builders():
"debug": False,
"device": "qemu-armv8",
"on_virtual_machine": True,
+ "product": "armv8",
}
)
ci_builder(
@@ -512,6 +513,7 @@ def host_builders():
"debug": False,
"device": "qemu-riscv64",
"on_virtual_machine": True,
+ "product": "riscv64",
}
)
ci_builder(
@@ -525,6 +527,7 @@ def host_builders():
"debug": False,
"device": "qemu-riscv64",
"on_virtual_machine": True,
+ "product": "riscv64",
}
)
diff --git a/tools/run-libcore-tests.py b/tools/run-libcore-tests.py
index 72db66c634..244f912c72 100755
--- a/tools/run-libcore-tests.py
+++ b/tools/run-libcore-tests.py
@@ -264,6 +264,8 @@ DISABLED_GCSTRESS_DEBUG_TESTS = {
"test.java.lang.StrictMath.HypotTests#testAgainstTranslit_shard4",
"test.java.math.BigDecimal",
"test.java.math.BigInteger#testConstructor",
+ "test.java.util.TestFormatter",
+ "test.java.util.Collection",
}
DISABLED_FUGU_TESTS = {