diff --git a/common.gypi b/common.gypi index 89f6e87c28af79..f3e47909677639 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.20', + 'v8_embedder_string': '-node.21', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/execution/clobber-registers.cc b/deps/v8/src/execution/clobber-registers.cc index 8f7fba765f1ca5..a7f5bf80cfec4e 100644 --- a/deps/v8/src/execution/clobber-registers.cc +++ b/deps/v8/src/execution/clobber-registers.cc @@ -5,19 +5,22 @@ #include "src/base/build_config.h" -#if V8_HOST_ARCH_ARM +// Check both {HOST_ARCH} and {TARGET_ARCH} to disable the functionality of this +// file for cross-compilation. The reason is that the inline assembly code below +// does not work for cross-compilation. +#if V8_HOST_ARCH_ARM && V8_TARGET_ARCH_ARM #include "src/codegen/arm/register-arm.h" -#elif V8_HOST_ARCH_ARM64 +#elif V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64 #include "src/codegen/arm64/register-arm64.h" -#elif V8_HOST_ARCH_IA32 +#elif V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32 #include "src/codegen/ia32/register-ia32.h" -#elif V8_HOST_ARCH_X64 +#elif V8_HOST_ARCH_X64 && V8_TARGET_ARCH_X64 #include "src/codegen/x64/register-x64.h" -#elif V8_HOST_ARCH_LOONG64 +#elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64 #include "src/codegen/loong64/register-loong64.h" -#elif V8_HOST_ARCH_MIPS +#elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS #include "src/codegen/mips/register-mips.h" -#elif V8_HOST_ARCH_MIPS64 +#elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/register-mips64.h" #endif @@ -26,14 +29,15 @@ namespace internal { #if V8_CC_MSVC // msvc only support inline assembly on x86 -#if V8_HOST_ARCH_IA32 +#if V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32 #define CLOBBER_REGISTER(R) __asm xorps R, R #endif #else // !V8_CC_MSVC -#if V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32 +#if (V8_HOST_ARCH_X64 && V8_TARGET_ARCH_X64) || \ + (V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32) #define CLOBBER_REGISTER(R) \ __asm__ volatile( \ "xorps " \ @@ -42,20 +46,19 @@ namespace internal { "%%" #R :: \ :); -#elif V8_HOST_ARCH_ARM64 +#elif V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64 #define CLOBBER_REGISTER(R) __asm__ volatile("fmov " #R ",xzr" :::); -#elif V8_HOST_ARCH_LOONG64 +#elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64 #define CLOBBER_REGISTER(R) __asm__ volatile("movgr2fr.d $" #R ",$zero" :::); -#elif V8_HOST_ARCH_MIPS +#elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS #define CLOBBER_USE_REGISTER(R) __asm__ volatile("mtc1 $zero,$" #R :::); -#elif V8_HOST_ARCH_MIPS64 +#elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64 #define CLOBBER_USE_REGISTER(R) __asm__ volatile("dmtc1 $zero,$" #R :::); -#endif // V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM64 || - // V8_HOST_ARCH_LOONG64 || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 +#endif // V8_HOST_ARCH_XXX && V8_TARGET_ARCH_XXX #endif // V8_CC_MSVC