From 958686c4c7f6c7aef7c8b0724bb12fc67fde0787 Mon Sep 17 00:00:00 2001 From: Bertrand Presles Date: Tue, 2 Feb 2021 16:40:52 +0100 Subject: [PATCH] Backported PR #35986 --- common.gypi | 2 +- deps/v8/BUILD.gn | 1 + deps/v8/include/v8-platform.h | 8 ++- deps/v8/src/base/page-allocator.cc | 14 ++++ deps/v8/src/base/platform/platform-cygwin.cc | 1 + deps/v8/src/base/platform/platform-fuchsia.cc | 1 + deps/v8/src/base/platform/platform-posix.cc | 6 ++ deps/v8/src/base/platform/platform-win32.cc | 1 + deps/v8/src/base/platform/platform.h | 5 +- deps/v8/src/utils/allocation.cc | 10 +-- deps/v8/src/utils/allocation.h | 7 +- deps/v8/src/wasm/code-space-access.h | 69 +++++++++++++++++++ deps/v8/src/wasm/wasm-code-manager.cc | 20 +++++- deps/v8/src/wasm/wasm-serialization.cc | 2 + 14 files changed, 137 insertions(+), 10 deletions(-) create mode 100644 deps/v8/src/wasm/code-space-access.h diff --git a/common.gypi b/common.gypi index 102f0907745507..0df7b8f48a34ed 100644 --- a/common.gypi +++ b/common.gypi @@ -34,7 +34,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.45', + 'v8_embedder_string': '-node.49 ', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 90ec4097d7a2b8..9c656c7cc0a40f 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -2907,6 +2907,7 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/baseline/liftoff-compiler.cc", "src/wasm/baseline/liftoff-compiler.h", "src/wasm/baseline/liftoff-register.h", + "src/wasm/code-space-access.h", "src/wasm/compilation-environment.h", "src/wasm/decoder.h", "src/wasm/function-body-decoder-impl.h", diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index 7e43b0d9db4a9d..4ecdc587f58b28 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -240,7 +240,13 @@ class PageAllocator { kReadWrite, // TODO(hpayer): Remove this flag. Memory should never be rwx. kReadWriteExecute, - kReadExecute + kReadExecute, + // Set this when reserving memory that will later require kReadWriteExecute + // permissions. The resulting behavior is platform-specific, currently + // this is used to set the MAP_JIT flag on Apple Silicon. + // TODO(jkummerow): Remove this when Wasm has a platform-independent + // w^x implementation. + kNoAccessWillJitLater }; /** diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc index 76a0aff39953a4..28f6c157c5535a 100644 --- a/deps/v8/src/base/page-allocator.cc +++ b/deps/v8/src/base/page-allocator.cc @@ -6,6 +6,10 @@ #include "src/base/platform/platform.h" +#if V8_OS_MACOSX +#include // For MAP_JIT. +#endif + namespace v8 { namespace base { @@ -21,6 +25,8 @@ STATIC_ASSERT_ENUM(PageAllocator::kReadWriteExecute, base::OS::MemoryPermission::kReadWriteExecute); STATIC_ASSERT_ENUM(PageAllocator::kReadExecute, base::OS::MemoryPermission::kReadExecute); +STATIC_ASSERT_ENUM(PageAllocator::kNoAccessWillJitLater, + base::OS::MemoryPermission::kNoAccessWillJitLater); #undef STATIC_ASSERT_ENUM @@ -38,6 +44,14 @@ void* PageAllocator::GetRandomMmapAddr() { void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment, PageAllocator::Permission access) { + #if !(V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT)) + // kNoAccessWillJitLater is only used on Apple Silicon. Map it to regular + // kNoAccess on other platforms, so code doesn't have to handle both enum + // values. + if (access == PageAllocator::kNoAccessWillJitLater) { + access = PageAllocator::kNoAccess; + } + #endif return base::OS::Allocate(hint, size, alignment, static_cast(access)); } diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc index 92a5fbe490f4c3..b9da2f1cd592db 100644 --- a/deps/v8/src/base/platform/platform-cygwin.cc +++ b/deps/v8/src/base/platform/platform-cygwin.cc @@ -33,6 +33,7 @@ namespace { DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) { switch (access) { case OS::MemoryPermission::kNoAccess: + case OS::MemoryPermission::kNoAccessWillJitLater: return PAGE_NOACCESS; case OS::MemoryPermission::kRead: return PAGE_READONLY; diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index fa175c39177aea..35a508a140ebd7 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -18,6 +18,7 @@ namespace { uint32_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) { switch (access) { case OS::MemoryPermission::kNoAccess: + case OS::MemoryPermission::kNoAccessWillJitLater: return 0; // no permissions case OS::MemoryPermission::kRead: return ZX_VM_PERM_READ; diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index c50cdd7a98eefd..3e9b8f3463a984 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -111,6 +111,7 @@ const int kMmapFdOffset = 0; int GetProtectionFromMemoryPermission(OS::MemoryPermission access) { switch (access) { case OS::MemoryPermission::kNoAccess: + case OS::MemoryPermission::kNoAccessWillJitLater: return PROT_NONE; case OS::MemoryPermission::kRead: return PROT_READ; @@ -134,6 +135,11 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access) { flags |= MAP_LAZY; #endif // V8_OS_QNX } + #if V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT) + if (access == OS::MemoryPermission::kNoAccessWillJitLater) { + flags |= MAP_JIT; + } + #endif return flags; } diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index 04ef8a30f229bd..027414e9eaee58 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -753,6 +753,7 @@ namespace { DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) { switch (access) { case OS::MemoryPermission::kNoAccess: + case OS::MemoryPermission::kNoAccessWillJitLater: return PAGE_NOACCESS; case OS::MemoryPermission::kRead: return PAGE_READONLY; diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index e1f84043eb8a73..7cb22d3de4eb13 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -163,7 +163,10 @@ class V8_BASE_EXPORT OS { kReadWrite, // TODO(hpayer): Remove this flag. Memory should never be rwx. kReadWriteExecute, - kReadExecute + kReadExecute, + // TODO(jkummerow): Remove this when Wasm has a platform-independent + // w^x implementation. + kNoAccessWillJitLater }; static bool HasLazyCommits(); diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc index f44b3c42ea9dcf..99ee20921755bc 100644 --- a/deps/v8/src/utils/allocation.cc +++ b/deps/v8/src/utils/allocation.cc @@ -207,15 +207,17 @@ bool OnCriticalMemoryPressure(size_t length) { } VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size, - void* hint, size_t alignment) + void* hint, size_t alignment, JitPermission jit) : page_allocator_(page_allocator) { DCHECK_NOT_NULL(page_allocator); DCHECK(IsAligned(size, page_allocator_->CommitPageSize())); size_t page_size = page_allocator_->AllocatePageSize(); alignment = RoundUp(alignment, page_size); - Address address = reinterpret_cast
( - AllocatePages(page_allocator_, hint, RoundUp(size, page_size), alignment, - PageAllocator::kNoAccess)); + PageAllocator::Permission permissions = + jit == kMapAsJittable ? PageAllocator::kNoAccessWillJitLater + : PageAllocator::kNoAccess; + Address address = reinterpret_cast
(AllocatePages( + page_allocator_, hint, RoundUp(size, page_size), alignment, permissions)); if (address != kNullAddress) { DCHECK(IsAligned(address, alignment)); region_ = base::AddressRegion(address, size); diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h index 4cb244172c5ba9..d93b8c597bcc60 100644 --- a/deps/v8/src/utils/allocation.h +++ b/deps/v8/src/utils/allocation.h @@ -150,6 +150,8 @@ V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length); // Represents and controls an area of reserved memory. class V8_EXPORT_PRIVATE VirtualMemory final { public: + enum JitPermission { kNoJit, kMapAsJittable }; + // Empty VirtualMemory object, controlling no reserved memory. VirtualMemory() = default; @@ -157,8 +159,9 @@ class V8_EXPORT_PRIVATE VirtualMemory final { // aligned per |alignment| rounded up to the |page_allocator|'s allocate page // size. The |size| must be aligned with |page_allocator|'s commit page size. // This may not be at the position returned by address(). - VirtualMemory(v8::PageAllocator* page_allocator, size_t size, void* hint, - size_t alignment = 1); + VirtualMemory(v8::PageAllocator* page_allocator, + size_t size, void* hint, size_t alignment = 1, + JitPermission jit = kNoJit); // Construct a virtual memory by assigning it some already mapped address // and size. diff --git a/deps/v8/src/wasm/code-space-access.h b/deps/v8/src/wasm/code-space-access.h new file mode 100644 index 00000000000000..5eeb980e17eddc --- /dev/null +++ b/deps/v8/src/wasm/code-space-access.h @@ -0,0 +1,69 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_WASM_CODE_SPACE_ACCESS_H_ +#define V8_WASM_CODE_SPACE_ACCESS_H_ + +#include "src/base/build_config.h" +#include "src/base/macros.h" +#include "src/common/globals.h" + +namespace v8 { +namespace internal { + +#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64) + +// Ignoring this warning is considered better than relying on +// __builtin_available. +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunguarded-availability-new" +inline void SwitchMemoryPermissionsToWritable() { + pthread_jit_write_protect_np(0); +} +inline void SwitchMemoryPermissionsToExecutable() { + pthread_jit_write_protect_np(1); +} +#pragma clang diagnostic pop + +namespace wasm { + +class CodeSpaceWriteScope { + public: + // TODO(jkummerow): Background threads could permanently stay in + // writable mode; only the main thread has to switch back and forth. + CodeSpaceWriteScope() { + if (code_space_write_nesting_level_ == 0) { + SwitchMemoryPermissionsToWritable(); + } + code_space_write_nesting_level_++; + } + ~CodeSpaceWriteScope() { + code_space_write_nesting_level_--; + if (code_space_write_nesting_level_ == 0) { + SwitchMemoryPermissionsToExecutable(); + } + } + + private: + static thread_local int code_space_write_nesting_level_; +}; + +#define CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope _write_access_; + +} // namespace wasm + +#else // Not Mac-on-arm64. + +// Nothing to do, we map code memory with rwx permissions. +inline void SwitchMemoryPermissionsToWritable() {} +inline void SwitchMemoryPermissionsToExecutable() {} + +#define CODE_SPACE_WRITE_SCOPE + +#endif // V8_OS_MACOSX && V8_HOST_ARCH_ARM64 + +} // namespace internal +} // namespace v8 + +#endif // V8_WASM_CODE_SPACE_ACCESS_H_ diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc index 91cfc01ceae649..a29f6dee755ef5 100644 --- a/deps/v8/src/wasm/wasm-code-manager.cc +++ b/deps/v8/src/wasm/wasm-code-manager.cc @@ -6,6 +6,7 @@ #include +#include "src/base/build_config.h" #include "src/base/adapters.h" #include "src/base/macros.h" #include "src/base/platform/platform.h" @@ -21,6 +22,7 @@ #include "src/snapshot/embedded/embedded-data.h" #include "src/utils/ostreams.h" #include "src/utils/vector.h" +#include "src/wasm/code-space-access.h" #include "src/wasm/compilation-environment.h" #include "src/wasm/function-compiler.h" #include "src/wasm/jump-table-assembler.h" @@ -45,6 +47,10 @@ namespace wasm { using trap_handler::ProtectedInstructionData; +#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64) +thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0; +#endif + base::AddressRegion DisjointAllocationPool::Merge(base::AddressRegion region) { auto dest_it = regions_.begin(); auto dest_end = regions_.end(); @@ -626,6 +632,7 @@ void WasmCodeAllocator::FreeCode(Vector codes) { // Zap code area and collect freed code regions. DisjointAllocationPool freed_regions; size_t code_size = 0; + CODE_SPACE_WRITE_SCOPE for (WasmCode* code : codes) { ZapCode(code->instruction_start(), code->instructions().size()); FlushInstructionCache(code->instruction_start(), @@ -731,6 +738,7 @@ CompilationEnv NativeModule::CreateCompilationEnv() const { } WasmCode* NativeModule::AddCodeForTesting(Handle code) { + CODE_SPACE_WRITE_SCOPE return AddAndPublishAnonymousCode(code, WasmCode::kFunction); } @@ -742,6 +750,7 @@ void NativeModule::UseLazyStub(uint32_t func_index) { if (!lazy_compile_table_) { uint32_t num_slots = module_->num_declared_functions; WasmCodeRefScope code_ref_scope; + CODE_SPACE_WRITE_SCOPE DCHECK_EQ(1, code_space_data_.size()); lazy_compile_table_ = CreateEmptyJumpTableInRegion( JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots), @@ -924,6 +933,7 @@ std::unique_ptr NativeModule::AddCodeWithCodeSpace( OwnedVector reloc_info; if (desc.reloc_size > 0) { reloc_info = OwnedVector::New(desc.reloc_size); + CODE_SPACE_WRITE_SCOPE memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size, desc.reloc_size); } @@ -1070,6 +1080,7 @@ WasmCode* NativeModule::AddDeserializedCode( OwnedVector reloc_info, OwnedVector source_position_table, WasmCode::Kind kind, ExecutionTier tier) { + // CodeSpaceWriteScope is provided by the caller. Vector dst_code_bytes = code_allocator_.AllocateForCode(this, instructions.size()); memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size()); @@ -1126,6 +1137,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion( Vector code_space = code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region); DCHECK(!code_space.empty()); + CODE_SPACE_WRITE_SCOPE ZapCode(reinterpret_cast
(code_space.begin()), code_space.size()); std::unique_ptr code{new WasmCode{ this, // native_module @@ -1171,6 +1183,7 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) { #endif // V8_OS_WIN64 WasmCodeRefScope code_ref_scope; + CODE_SPACE_WRITE_SCOPE WasmCode* jump_table = nullptr; const uint32_t num_wasm_functions = module_->num_declared_functions; const bool has_functions = num_wasm_functions > 0; @@ -1353,7 +1366,11 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) { if (!memory_tracker_->ReserveAddressSpace(size)) return {}; if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr(); - VirtualMemory mem(page_allocator, size, hint, allocate_page_size); + // When we start exposing Wasm in jitless mode, then the jitless flag + // will have to determine whether we set kMapAsJittable or not. + DCHECK(!FLAG_jitless); + VirtualMemory mem(page_allocator, size, hint, allocate_page_size, + VirtualMemory::kMapAsJittable); if (!mem.IsReserved()) { memory_tracker_->ReleaseReservation(size); return {}; @@ -1513,6 +1530,7 @@ std::vector NativeModule::AddCompiledCode( DCHECK(!results.empty()); // First, allocate code space for all the results. size_t total_code_space = 0; + CODE_SPACE_WRITE_SCOPE for (auto& result : results) { DCHECK(result.succeeded()); total_code_space += RoundUp(result.code_desc.instr_size); diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc index 81460b9fe29912..24c61e757edd04 100644 --- a/deps/v8/src/wasm/wasm-serialization.cc +++ b/deps/v8/src/wasm/wasm-serialization.cc @@ -14,6 +14,7 @@ #include "src/utils/ostreams.h" #include "src/utils/utils.h" #include "src/utils/version.h" +#include "src/wasm/code-space-access.h" #include "src/wasm/function-compiler.h" #include "src/wasm/module-compiler.h" #include "src/wasm/module-decoder.h" @@ -537,6 +538,7 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) { protected_instructions_size); reader->ReadVector(Vector::cast(protected_instructions.as_vector())); + CODE_SPACE_WRITE_SCOPE WasmCode* code = native_module_->AddDeserializedCode( fn_index, code_buffer, stack_slot_count, tagged_parameter_slots, safepoint_table_offset, handler_table_offset, constant_pool_offset,