File riscv-v8.patch of Package chromium
From 4d24761fae4b1de87a1ba7a507e98be157a64bf4 Mon Sep 17 00:00:00 2001
From: Alexey Pavlyutkin <alexey.pavlyutkin@syntacore.com>
Date: Thu, 13 Feb 2025 11:40:23 +0000
Subject: [PATCH] [riscv][leaptiering] Enable leaptiering support
Fix RISC-V build broken by unconditional enabling of leaptiering (see
6207491).
Change-Id: I72db70b09b98410ae83d40b3251495b8bcbda207
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/6249680
Reviewed-by: Yahan Lu (LuYahan) <yahan@iscas.ac.cn>
Reviewed-by: Ji Qiu <qiuji@iscas.ac.cn>
Commit-Queue: Ji Qiu <qiuji@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#98693}
---
src/builtins/riscv/builtins-riscv.cc | 27 +-
src/codegen/riscv/macro-assembler-riscv.cc | 321 +++++++++++-------
src/codegen/riscv/macro-assembler-riscv.h | 48 ++-
src/codegen/riscv/register-riscv.h | 3 +-
.../backend/riscv/code-generator-riscv.cc | 41 ++-
src/maglev/riscv/maglev-assembler-riscv.cc | 3 +
6 files changed, 286 insertions(+), 157 deletions(-)
Index: chromium-134.0.6998.35/v8/src/builtins/riscv/builtins-riscv.cc
===================================================================
--- chromium-134.0.6998.35.orig/v8/src/builtins/riscv/builtins-riscv.cc
+++ chromium-134.0.6998.35/v8/src/builtins/riscv/builtins-riscv.cc
@@ -996,6 +996,7 @@ void Builtins::Generate_BaselineOutOfLin
}
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ ASM_CODE_COMMENT(masm);
UseScratchRegisterScope temps(masm);
temps.Include({kScratchReg, kScratchReg2, s1});
auto descriptor =
@@ -1016,11 +1017,14 @@ void Builtins::Generate_BaselineOutOfLin
__ AssertFeedbackVector(feedback_vector, type);
}
+#ifndef V8_ENABLE_LEAPTIERING
// Check for an tiering state.
Label flags_need_processing;
Register flags = temps.Acquire();
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
+#endif
+
{
UseScratchRegisterScope temps(masm);
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire());
@@ -1096,6 +1100,7 @@ void Builtins::Generate_BaselineOutOfLin
// TODO(v8:11429): Document this frame setup better.
__ Ret();
+#ifndef V8_ENABLE_LEAPTIERING
__ bind(&flags_need_processing);
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
@@ -1104,6 +1109,7 @@ void Builtins::Generate_BaselineOutOfLin
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
__ Trap();
}
+#endif
__ bind(&call_stack_guard);
{
@@ -1157,15 +1163,16 @@ void Builtins::Generate_InterpreterEntry
__ LoadFeedbackVector(feedback_vector, closure, a4, &push_stack_frame);
#ifndef V8_JITLESS
+#ifndef V8_ENABLE_LEAPTIERING
// If feedback vector is valid, check for optimized code and update invocation
// count.
-
// Check the tiering state.
Label flags_need_processing;
Register flags = a4;
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
&flags_need_processing);
+#endif // V8_ENABLE_LEAPTIERING
ResetFeedbackVectorOsrUrgency(masm, feedback_vector, a4);
// Increment invocation count for the function.
@@ -1323,10 +1330,13 @@ void Builtins::Generate_InterpreterEntry
__ Branch(&after_stack_check_interrupt);
#ifndef V8_JITLESS
+#ifndef V8_ENABLE_LEAPTIERING
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
+#endif //! V8_ENABLE_LEAPTIERING
__ bind(&is_baseline);
{
+#ifndef V8_ENABLE_LEAPTIERING
// Load the feedback vector from the closure.
__ LoadTaggedField(
feedback_vector,
@@ -1347,7 +1357,6 @@ void Builtins::Generate_InterpreterEntry
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
-#ifndef V8_ENABLE_LEAPTIERING
// TODO(olivf, 42204201): This fastcase is difficult to support with the
// sandbox as it requires getting write access to the dispatch table. See
// `JSFunction::UpdateCode`. We might want to remove it for all
@@ -1357,9 +1366,9 @@ void Builtins::Generate_InterpreterEntry
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(a2, closure);
__ JumpCodeObject(a2, kJSEntrypointTag);
-#endif // V8_ENABLE_LEAPTIERING
-
__ bind(&install_baseline_code);
+#endif // !V8_ENABLE_LEAPTIERING
+
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
}
#endif // !V8_JITLESS
@@ -2630,9 +2639,13 @@ void Builtins::Generate_CallFunction(Mac
// -- cp : the function context.
// -----------------------------------
+#ifdef V8_ENABLE_LEAPTIERING
+ __ InvokeFunctionCode(a1, no_reg, a0, InvokeType::kJump);
+#else
__ Lhu(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
__ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
+#endif
}
namespace {
@@ -5084,9 +5097,13 @@ void Builtins::Generate_RestartFrameTram
// Pop return address and frame.
__ LeaveFrame(StackFrame::INTERPRETED);
+#ifdef V8_ENABLE_LEAPTIERING
+ __ InvokeFunction(a1, a0, InvokeType::kJump,
+ ArgumentAdaptionMode::kDontAdapt);
+#else
__ li(a2, Operand(kDontAdaptArgumentsSentinel));
-
__ InvokeFunction(a1, a2, a0, InvokeType::kJump);
+#endif
}
#undef __
Index: chromium-134.0.6998.35/v8/src/codegen/riscv/macro-assembler-riscv.cc
===================================================================
--- chromium-134.0.6998.35.orig/v8/src/codegen/riscv/macro-assembler-riscv.cc
+++ chromium-134.0.6998.35/v8/src/codegen/riscv/macro-assembler-riscv.cc
@@ -209,24 +209,7 @@ void MacroAssembler::GenerateTailCallToR
Jump(a2);
}
-Condition MacroAssembler::LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(
- Register flags, Register feedback_vector, Register result,
- CodeKind current_code_kind) {
- ASM_CODE_COMMENT(this);
- DCHECK(!AreAliased(flags, feedback_vector));
- DCHECK(CodeKindCanTierUp(current_code_kind));
-
- Lhu(flags, FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
- uint32_t kFlagsMask = FeedbackVector::kFlagsTieringStateIsAnyRequested |
- FeedbackVector::kFlagsMaybeHasTurbofanCode |
- FeedbackVector::kFlagsLogNextExecution;
- if (current_code_kind != CodeKind::MAGLEV) {
- kFlagsMask |= FeedbackVector::kFlagsMaybeHasMaglevCode;
- }
- And(result, flags, Operand(kFlagsMask));
- return ne;
-}
-
+#ifndef V8_ENABLE_LEAPTIERING
// Read off the flags in the feedback vector and check if there
// is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
@@ -248,22 +231,6 @@ void MacroAssembler::OptimizeCodeOrTailC
Register flags, Register feedback_vector) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(flags, feedback_vector));
-#ifdef V8_ENABLE_LEAPTIERING
- // In the leaptiering case, we don't load optimized code from the feedback
- // vector so only need to call CompileOptimized or FunctionLogNextExecution
- // here. See also LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing above.
- Label needs_logging;
- {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- And(scratch, flags,
- Operand(FeedbackVector::kFlagsTieringStateIsAnyRequested));
- Branch(&needs_logging, eq, scratch, Operand(zero_reg));
- }
- GenerateTailCallToReturnedCode(Runtime::kCompileOptimized);
- bind(&needs_logging);
- GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution);
-#else
UseScratchRegisterScope temps(this);
temps.Include(t0, t1);
Label maybe_has_optimized_code, maybe_needs_logging;
@@ -296,8 +263,8 @@ void MacroAssembler::OptimizeCodeOrTailC
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(this, optimized_code_entry, temps.Acquire(),
temps.Acquire());
-#endif // V8_ENABLE_LEAPTIERING
}
+#endif // V8_ENABLE_LEAPTIERING
void MacroAssembler::LoadIsolateField(const Register& rd, IsolateFieldId id) {
li(rd, ExternalReference::Create(id));
@@ -5630,7 +5597,7 @@ void MacroAssembler::StackOverflowCheck(
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeType type) {
+ InvokeType type) {
Label regular_invoke;
// a0: actual arguments count
@@ -5649,8 +5616,7 @@ void MacroAssembler::InvokePrologue(Regi
Label stack_overflow;
{
UseScratchRegisterScope temps(this);
- StackOverflowCheck(expected_parameter_count, temps.Acquire(),
- temps.Acquire(), &stack_overflow);
+ StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
}
// Underapplication. Move the arguments already in the stack, including the
// receiver and the return address.
@@ -5694,90 +5660,150 @@ void MacroAssembler::InvokePrologue(Regi
bind(®ular_invoke);
}
-void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
- Register expected_parameter_count,
- Register actual_parameter_count) {
- Label skip_hook;
- {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- li(scratch,
- ExternalReference::debug_hook_on_function_call_address(isolate()));
- Lb(scratch, MemOperand(scratch));
- Branch(&skip_hook, eq, scratch, Operand(zero_reg));
- }
- {
- // Load receiver to pass it later to DebugOnFunctionCall hook.
- UseScratchRegisterScope temps(this);
- Register receiver = temps.Acquire();
- LoadReceiver(receiver);
+void MacroAssembler::CallDebugOnFunctionCall(
+ Register fun, Register new_target,
+ Register expected_parameter_count_or_dispatch_handle,
+ Register actual_parameter_count) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(!AreAliased(t0, fun, new_target,
+ expected_parameter_count_or_dispatch_handle,
+ actual_parameter_count));
+
+ // Load receiver to pass it later to DebugOnFunctionCall hook.
+ LoadReceiver(t0);
+ FrameScope frame(
+ this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
+
+ SmiTag(expected_parameter_count_or_dispatch_handle);
+ SmiTag(actual_parameter_count);
+ Push(expected_parameter_count_or_dispatch_handle, actual_parameter_count);
+
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun, fun, t0);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+
+ Pop(expected_parameter_count_or_dispatch_handle, actual_parameter_count);
+ SmiUntag(actual_parameter_count);
+ SmiUntag(expected_parameter_count_or_dispatch_handle);
+}
- FrameScope frame(
- this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
- SmiTag(expected_parameter_count);
- Push(expected_parameter_count);
+#ifdef V8_ENABLE_LEAPTIERING
+void MacroAssembler::InvokeFunction(
+ Register function, Register actual_parameter_count, InvokeType type,
+ ArgumentAdaptionMode argument_adaption_mode) {
+ ASM_CODE_COMMENT(this);
+ // You can't call a function without a valid frame.
+ DCHECK(type == InvokeType::kJump || has_frame());
- SmiTag(actual_parameter_count);
- Push(actual_parameter_count);
+ // Contract with called JS functions requires that function is passed in a1.
+ // (See FullCodeGenerator::Generate().)
+ DCHECK_EQ(function, a1);
- if (new_target.is_valid()) {
- Push(new_target);
- }
- Push(fun);
- Push(fun);
- Push(receiver);
- CallRuntime(Runtime::kDebugOnFunctionCall);
- Pop(fun);
- if (new_target.is_valid()) {
- Pop(new_target);
- }
+ // Set up the context.
+ LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
- Pop(actual_parameter_count);
- SmiUntag(actual_parameter_count);
+ InvokeFunctionCode(function, no_reg, actual_parameter_count, type,
+ argument_adaption_mode);
+}
- Pop(expected_parameter_count);
- SmiUntag(expected_parameter_count);
- }
- bind(&skip_hook);
+void MacroAssembler::InvokeFunctionWithNewTarget(
+ Register function, Register new_target, Register actual_parameter_count,
+ InvokeType type) {
+ ASM_CODE_COMMENT(this);
+ // You can't call a function without a valid frame.
+ DCHECK(type == InvokeType::kJump || has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ // (See FullCodeGenerator::Generate().)
+ DCHECK_EQ(function, a1);
+
+ LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+
+ InvokeFunctionCode(function, new_target, actual_parameter_count, type);
}
-void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
- Register expected_parameter_count,
- Register actual_parameter_count,
- InvokeType type) {
+void MacroAssembler::InvokeFunctionCode(
+ Register function, Register new_target, Register actual_parameter_count,
+ InvokeType type, ArgumentAdaptionMode argument_adaption_mode) {
+ ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, a1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
+ Register dispatch_handle = kJavaScriptCallDispatchHandleRegister;
+ Lw(dispatch_handle,
+ FieldMemOperand(function, JSFunction::kDispatchHandleOffset));
+
// On function call, call into the debugger if necessary.
- CheckDebugHook(function, new_target, expected_parameter_count,
- actual_parameter_count);
+ Label debug_hook, continue_after_hook;
+ {
+ li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
+ Lb(t0, MemOperand(t0, 0));
+ BranchShort(&debug_hook, ne, t0, Operand(zero_reg));
+ }
+ bind(&continue_after_hook);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
LoadRoot(a3, RootIndex::kUndefinedValue);
}
- Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
+ Register scratch = s1;
+ if (argument_adaption_mode == ArgumentAdaptionMode::kAdapt) {
+ Register expected_parameter_count = a2;
+ LoadParameterCountFromJSDispatchTable(expected_parameter_count,
+ dispatch_handle, scratch);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, type);
+ }
+
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
- constexpr int unused_argument_count = 0;
+ LoadEntrypointFromJSDispatchTable(kJavaScriptCallCodeStartRegister,
+ dispatch_handle, scratch);
switch (type) {
case InvokeType::kCall:
- CallJSFunction(function, unused_argument_count);
+ Call(kJavaScriptCallCodeStartRegister);
break;
case InvokeType::kJump:
- JumpJSFunction(function);
+ Jump(kJavaScriptCallCodeStartRegister);
break;
}
+ Label done;
+ Branch(&done);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, dispatch_handle,
+ actual_parameter_count);
+ Branch(&continue_after_hook);
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
bind(&done);
}
+#else // !V8_ENABLE_LEAPTIERING
+void MacroAssembler::InvokeFunction(Register function,
+ Register expected_parameter_count,
+ Register actual_parameter_count,
+ InvokeType type) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ DCHECK_EQ(function, a1);
+
+ // Get the function and setup the context.
+ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ InvokeFunctionCode(a1, no_reg, expected_parameter_count,
+ actual_parameter_count, type);
+}
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
@@ -5804,23 +5830,44 @@ void MacroAssembler::InvokeFunctionWithN
actual_parameter_count, type);
}
-void MacroAssembler::InvokeFunction(Register function,
- Register expected_parameter_count,
- Register actual_parameter_count,
- InvokeType type) {
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count,
+ InvokeType type) {
// You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
-
- // Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
- // Get the function and setup the context.
- LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // On function call, call into the debugger if necessary.
+ CheckDebugHook(function, new_target, expected_parameter_count,
+ actual_parameter_count);
- InvokeFunctionCode(a1, no_reg, expected_parameter_count,
- actual_parameter_count, type);
-}
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(a3, RootIndex::kUndefinedValue);
+ }
+ Label done;
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ constexpr int unused_argument_count = 0;
+ switch (type) {
+ case InvokeType::kCall:
+ CallJSFunction(function, unused_argument_count);
+ break;
+ case InvokeType::kJump:
+ JumpJSFunction(function);
+ break;
+ }
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+}
+#endif // V8_ENABLE_LEAPTIERING
// ---------------------------------------------------------------------------
// Support functions.
@@ -7240,14 +7287,30 @@ void MacroAssembler::ComputeCodeStartAdd
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void MacroAssembler::BailoutIfDeoptimized() {
- int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
- LoadProtectedPointerField(
- kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
- Lw(kScratchReg, FieldMemOperand(kScratchReg, Code::kFlagsOffset));
- And(kScratchReg, kScratchReg,
- Operand(1 << Code::kMarkedForDeoptimizationBit));
- TailCallBuiltin(Builtin::kCompileLazyDeoptimizedCode, ne, kScratchReg,
+ ASM_CODE_COMMENT(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (v8_flags.debug_code || !V8_ENABLE_LEAPTIERING_BOOL) {
+ int offset =
+ InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
+ LoadProtectedPointerField(
+ scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ Lw(scratch, FieldMemOperand(scratch, Code::kFlagsOffset));
+ }
+
+#ifdef V8_ENABLE_LEAPTIERING
+ if (v8_flags.debug_code) {
+ Label not_deoptimized;
+ And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ Branch(¬_deoptimized, eq, scratch, Operand(zero_reg));
+ Abort(AbortReason::kInvalidDeoptimizedCode);
+ bind(¬_deoptimized);
+ }
+#else
+ And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ TailCallBuiltin(Builtin::kCompileLazyDeoptimizedCode, ne, scratch,
Operand(zero_reg));
+#endif
}
void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
@@ -7303,22 +7366,20 @@ void MacroAssembler::JumpCodeObject(Regi
}
void MacroAssembler::CallJSFunction(Register function_object,
- uint16_t argument_count) {
- DCHECK_WITH_MSG(!V8_ENABLE_LEAPTIERING_BOOL,
- "argument_count is only used with Leaptiering");
+ [[maybe_unused]] uint16_t argument_count) {
ASM_CODE_COMMENT(this);
Register code = kJavaScriptCallCodeStartRegister;
#ifdef V8_ENABLE_LEAPTIERING
- UseScratchRegisterScope temps(this);
- Register dispatch_handle = t0;
+ Register dispatch_handle = kJavaScriptCallDispatchHandleRegister;
Register parameter_count = t1;
+ UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Lw(dispatch_handle,
FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
LoadEntrypointAndParameterCountFromJSDispatchTable(code, parameter_count,
dispatch_handle, scratch);
Label match;
- Branch(&match, le, parameter_count, Immediate(argument_count));
+ Branch(&match, le, parameter_count, Operand(argument_count));
// If the parameter count doesn't match, we force a safe crash by setting the
// code entrypoint to zero, causing a nullptr dereference during the call.
mv(code, zero_reg);
@@ -7366,13 +7427,14 @@ void MacroAssembler::JumpJSFunction(Regi
ASM_CODE_COMMENT(this);
Register code = kJavaScriptCallCodeStartRegister;
#ifdef V8_ENABLE_LEAPTIERING
- LoadCodeEntrypointFromJSDispatchTable(
- code,
- FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
+ Register dispatch_handle = kJavaScriptCallDispatchHandleRegister;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Lw(dispatch_handle,
+ FieldMemOperand(function_object, JSFunction::kDispatchHandleOffset));
+ LoadEntrypointFromJSDispatchTable(code, dispatch_handle, scratch);
DCHECK_EQ(jump_mode, JumpMode::kJump);
- DCHECK_NE(code, t6);
- mv(t6, code);
- Jump(t6);
+ Jump(code);
#elif V8_ENABLE_SANDBOX
// When the sandbox is enabled, we can directly fetch the entrypoint pointer
// from the code pointer table instead of going through the Code object. In
@@ -7465,11 +7527,13 @@ void MacroAssembler::LoadEntrypointFromJ
Register scratch) {
DCHECK(!AreAliased(destination, scratch));
ASM_CODE_COMMENT(this);
+ Register index = destination;
li(scratch, ExternalReference::js_dispatch_table_address());
srli(index, dispatch_handle, kJSDispatchHandleShift);
slli(index, index, kJSDispatchTableEntrySizeLog2);
AddWord(scratch, scratch, index);
- Ld(destination, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
+ LoadWord(destination,
+ MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
}
void MacroAssembler::LoadEntrypointFromJSDispatchTable(
@@ -7486,7 +7550,7 @@ void MacroAssembler::LoadEntrypointFromJ
static_assert(!JSDispatchTable::kSupportsCompaction);
int offset = JSDispatchTable::OffsetOfEntry(dispatch_handle) +
JSDispatchEntry::kEntrypointOffset;
- Ld(destination, MemOperand(scratch, offset));
+ LoadWord(destination, MemOperand(scratch, offset));
}
void MacroAssembler::LoadParameterCountFromJSDispatchTable(
@@ -7494,12 +7558,14 @@ void MacroAssembler::LoadParameterCountF
DCHECK(!AreAliased(destination, scratch));
ASM_CODE_COMMENT(this);
Register index = destination;
- li(scratch, ExternalReference::js_dispatch_table_address());
srli(index, dispatch_handle, kJSDispatchHandleShift);
slli(index, index, kJSDispatchTableEntrySizeLog2);
+ li(scratch, ExternalReference::js_dispatch_table_address());
AddWord(scratch, scratch, index);
+#ifdef V8_TARGET_ARCH_64_BIT
static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
- Lh(destination, MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
+#endif
+ Lhu(destination, MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
}
void MacroAssembler::LoadEntrypointAndParameterCountFromJSDispatchTable(
@@ -7512,10 +7578,11 @@ void MacroAssembler::LoadEntrypointAndPa
srli(index, dispatch_handle, kJSDispatchHandleShift);
slli(index, index, kJSDispatchTableEntrySizeLog2);
AddWord(scratch, scratch, index);
-
- Ld(entrypoint, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
+ LoadWord(entrypoint, MemOperand(scratch, JSDispatchEntry::kEntrypointOffset));
+#ifdef V8_TARGET_ARCH_64_BIT
static_assert(JSDispatchEntry::kParameterCountMask == 0xffff);
- Lh(parameter_count, MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
+#endif
+ Lhu(parameter_count, MemOperand(scratch, JSDispatchEntry::kCodeObjectOffset));
}
#endif
Index: chromium-134.0.6998.35/v8/src/codegen/riscv/macro-assembler-riscv.h
===================================================================
--- chromium-134.0.6998.35.orig/v8/src/codegen/riscv/macro-assembler-riscv.h
+++ chromium-134.0.6998.35/v8/src/codegen/riscv/macro-assembler-riscv.h
@@ -1600,23 +1600,47 @@ class V8_EXPORT_PRIVATE MacroAssembler :
// -------------------------------------------------------------------------
// JavaScript invokes.
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeFunctionCode(Register function, Register new_target,
- Register expected_parameter_count,
- Register actual_parameter_count, InvokeType type);
-
- // On function call, call into the debugger if necessary.
- void CheckDebugHook(Register fun, Register new_target,
- Register expected_parameter_count,
- Register actual_parameter_count);
+ // On function call, call into the debugger.
+ void CallDebugOnFunctionCall(
+ Register fun, Register new_target,
+ Register expected_parameter_count_or_dispatch_handle,
+ Register actual_parameter_count);
+
+ // The way we invoke JSFunctions differs depending on whether leaptiering is
+ // enabled. As such, these functions exist in two variants. In the future,
+ // leaptiering will be used on all platforms. At that point, the
+ // non-leaptiering variants will disappear.
+#ifdef V8_ENABLE_LEAPTIERING
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
+ void InvokeFunction(Register function, Register actual_parameter_count,
+ InvokeType type,
+ ArgumentAdaptionMode argument_adaption_mode =
+ ArgumentAdaptionMode::kAdapt);
+ // Invoke the JavaScript function in the given register.
+ // Changes the current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
InvokeType type);
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeFunctionCode(Register function, Register new_target,
+ Register actual_parameter_count, InvokeType type,
+ ArgumentAdaptionMode argument_adaption_mode =
+ ArgumentAdaptionMode::kAdapt);
+#else
void InvokeFunction(Register function, Register expected_parameter_count,
Register actual_parameter_count, InvokeType type);
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunctionWithNewTarget(Register function, Register new_target,
+ Register actual_parameter_count,
+ InvokeType type);
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeFunctionCode(Register function, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count, InvokeType type);
+#endif
// ---- InstructionStream generation helpers ----
@@ -1657,14 +1681,13 @@ class V8_EXPORT_PRIVATE MacroAssembler :
Register closure);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id);
- Condition LoadFeedbackVectorFlagsAndCheckIfNeedsProcessing(
- Register flags, Register feedback_vector, Register result,
- CodeKind current_code_kind);
+#ifndef V8_ENABLE_LEAPTIERING
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
Register flags, Register feedback_vector, CodeKind current_code_kind,
Label* flags_need_processing);
void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags,
Register feedback_vector);
+#endif
// -------------------------------------------------------------------------
// Support functions.
@@ -1883,8 +1906,7 @@ class V8_EXPORT_PRIVATE MacroAssembler :
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
- Register actual_parameter_count, Label* done,
- InvokeType type);
+ Register actual_parameter_count, InvokeType type);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
Index: chromium-134.0.6998.35/v8/src/codegen/riscv/register-riscv.h
===================================================================
--- chromium-134.0.6998.35.orig/v8/src/codegen/riscv/register-riscv.h
+++ chromium-134.0.6998.35/v8/src/codegen/riscv/register-riscv.h
@@ -301,8 +301,7 @@ constexpr Register kJavaScriptCallCodeSt
constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kJavaScriptCallExtraArg1Register = a2;
-// Leaptiering is not currently available on riscv64.
-constexpr Register kJavaScriptCallDispatchHandleRegister = no_reg;
+constexpr Register kJavaScriptCallDispatchHandleRegister = a4;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
Index: chromium-134.0.6998.35/v8/src/compiler/backend/riscv/code-generator-riscv.cc
===================================================================
--- chromium-134.0.6998.35.orig/v8/src/compiler/backend/riscv/code-generator-riscv.cc
+++ chromium-134.0.6998.35/v8/src/compiler/backend/riscv/code-generator-riscv.cc
@@ -706,6 +706,36 @@ void CodeGenerator::AssembleCodeStartReg
kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
}
+#ifdef V8_ENABLE_LEAPTIERING
+// Check that {kJavaScriptCallDispatchHandleRegister} is correct.
+void CodeGenerator::AssembleDispatchHandleRegisterCheck() {
+ DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
+
+ // We currently don't check this for JS builtins as those are sometimes
+ // called directly (e.g. from other builtins) and not through the dispatch
+ // table. This is fine as builtin functions don't use the dispatch handle,
+ // but we could enable this check in the future if we make sure to pass the
+ // kInvalidDispatchHandle whenever we do a direct call to a JS builtin.
+ if (Builtins::IsBuiltinId(info()->builtin())) {
+ return;
+ }
+
+ // For now, we only ensure that the register references a valid dispatch
+ // entry with the correct parameter count. In the future, we may also be able
+ // to check that the entry points back to this code.
+ UseScratchRegisterScope temps(masm());
+ Register actual_parameter_count = temps.Acquire();
+ {
+ UseScratchRegisterScope temps(masm());
+ Register scratch = temps.Acquire();
+ __ LoadParameterCountFromJSDispatchTable(
+ actual_parameter_count, kJavaScriptCallDispatchHandleRegister, scratch);
+ }
+ __ Assert(eq, AbortReason::kWrongFunctionDispatchHandle,
+ actual_parameter_count, Operand(parameter_count_));
+}
+#endif // V8_ENABLE_LEAPTIERING
+
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
@@ -713,16 +743,7 @@ void CodeGenerator::AssembleCodeStartReg
// the flags in the referenced {Code} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
-void CodeGenerator::BailoutIfDeoptimized() {
- int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
- __ LoadProtectedPointerField(
- kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
- __ Lw(kScratchReg, FieldMemOperand(kScratchReg, Code::kFlagsOffset));
- __ And(kScratchReg, kScratchReg,
- Operand(1 << Code::kMarkedForDeoptimizationBit));
- __ TailCallBuiltin(Builtin::kCompileLazyDeoptimizedCode, ne, kScratchReg,
- Operand(zero_reg));
-}
+void CodeGenerator::BailoutIfDeoptimized() { __ BailoutIfDeoptimized(); }
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Index: chromium-134.0.6998.35/v8/src/maglev/riscv/maglev-assembler-riscv.cc
===================================================================
--- chromium-134.0.6998.35.orig/v8/src/maglev/riscv/maglev-assembler-riscv.cc
+++ chromium-134.0.6998.35/v8/src/maglev/riscv/maglev-assembler-riscv.cc
@@ -127,6 +127,7 @@ void MaglevAssembler::OSRPrologue(Graph*
}
void MaglevAssembler::Prologue(Graph* graph) {
+ ASM_CODE_COMMENT(this);
MaglevAssembler::TemporaryRegisterScope temps(this);
// We add two extra registers to the scope. Ideally we could add all the
// allocatable general registers, except Context, JSFunction, NewTarget and
@@ -147,6 +148,7 @@ void MaglevAssembler::Prologue(Graph* gr
}
// Tiering support.
+#ifndef V8_ENABLE_LEAPTIERING
if (v8_flags.turbofan) {
using D = MaglevOptimizeCodeOrTailCallOptimizedCodeSlotDescriptor;
Register flags = D::GetRegisterParameter(D::kFlags);
@@ -169,6 +171,7 @@ void MaglevAssembler::Prologue(Graph* gr
TailCallBuiltin(Builtin::kMaglevOptimizeCodeOrTailCallOptimizedCodeSlot,
needs_processing, flag_reg, Operand(zero_reg));
}
+#endif
EnterFrame(StackFrame::MAGLEV);
// Save arguments in frame.