Skip to content

Commit

Permalink
Backported PR nodejs#35986
Browse files Browse the repository at this point in the history
  • Loading branch information
Bertrand Presles authored and Bertrand Presles committed Feb 2, 2021
1 parent f5c706e commit 958686c
Show file tree
Hide file tree
Showing 14 changed files with 137 additions and 10 deletions.
2 changes: 1 addition & 1 deletion common.gypi
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@

# Reset this number to 0 on major V8 upgrades.
# Increment by one for each non-official patch applied to deps/v8.
'v8_embedder_string': '-node.45',
'v8_embedder_string': '-node.49 ',

##### V8 defaults for Node.js #####

Expand Down
1 change: 1 addition & 0 deletions deps/v8/BUILD.gn
Original file line number Diff line number Diff line change
Expand Up @@ -2907,6 +2907,7 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/baseline/liftoff-compiler.cc",
"src/wasm/baseline/liftoff-compiler.h",
"src/wasm/baseline/liftoff-register.h",
"src/wasm/code-space-access.h",
"src/wasm/compilation-environment.h",
"src/wasm/decoder.h",
"src/wasm/function-body-decoder-impl.h",
Expand Down
8 changes: 7 additions & 1 deletion deps/v8/include/v8-platform.h
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,13 @@ class PageAllocator {
kReadWrite,
// TODO(hpayer): Remove this flag. Memory should never be rwx.
kReadWriteExecute,
kReadExecute
kReadExecute,
// Set this when reserving memory that will later require kReadWriteExecute
// permissions. The resulting behavior is platform-specific, currently
// this is used to set the MAP_JIT flag on Apple Silicon.
// TODO(jkummerow): Remove this when Wasm has a platform-independent
// w^x implementation.
kNoAccessWillJitLater
};

/**
Expand Down
14 changes: 14 additions & 0 deletions deps/v8/src/base/page-allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@

#include "src/base/platform/platform.h"

#if V8_OS_MACOSX
#include <sys/mman.h> // For MAP_JIT.
#endif

namespace v8 {
namespace base {

Expand All @@ -21,6 +25,8 @@ STATIC_ASSERT_ENUM(PageAllocator::kReadWriteExecute,
base::OS::MemoryPermission::kReadWriteExecute);
STATIC_ASSERT_ENUM(PageAllocator::kReadExecute,
base::OS::MemoryPermission::kReadExecute);
STATIC_ASSERT_ENUM(PageAllocator::kNoAccessWillJitLater,
base::OS::MemoryPermission::kNoAccessWillJitLater);

#undef STATIC_ASSERT_ENUM

Expand All @@ -38,6 +44,14 @@ void* PageAllocator::GetRandomMmapAddr() {

void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment,
PageAllocator::Permission access) {
#if !(V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT))
// kNoAccessWillJitLater is only used on Apple Silicon. Map it to regular
// kNoAccess on other platforms, so code doesn't have to handle both enum
// values.
if (access == PageAllocator::kNoAccessWillJitLater) {
access = PageAllocator::kNoAccess;
}
#endif
return base::OS::Allocate(hint, size, alignment,
static_cast<base::OS::MemoryPermission>(access));
}
Expand Down
1 change: 1 addition & 0 deletions deps/v8/src/base/platform/platform-cygwin.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ namespace {
DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
case OS::MemoryPermission::kNoAccessWillJitLater:
return PAGE_NOACCESS;
case OS::MemoryPermission::kRead:
return PAGE_READONLY;
Expand Down
1 change: 1 addition & 0 deletions deps/v8/src/base/platform/platform-fuchsia.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ namespace {
uint32_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
case OS::MemoryPermission::kNoAccessWillJitLater:
return 0; // no permissions
case OS::MemoryPermission::kRead:
return ZX_VM_PERM_READ;
Expand Down
6 changes: 6 additions & 0 deletions deps/v8/src/base/platform/platform-posix.cc
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ const int kMmapFdOffset = 0;
int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
case OS::MemoryPermission::kNoAccessWillJitLater:
return PROT_NONE;
case OS::MemoryPermission::kRead:
return PROT_READ;
Expand All @@ -134,6 +135,11 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
flags |= MAP_LAZY;
#endif // V8_OS_QNX
}
#if V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT)
if (access == OS::MemoryPermission::kNoAccessWillJitLater) {
flags |= MAP_JIT;
}
#endif
return flags;
}

Expand Down
1 change: 1 addition & 0 deletions deps/v8/src/base/platform/platform-win32.cc
Original file line number Diff line number Diff line change
Expand Up @@ -753,6 +753,7 @@ namespace {
DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
switch (access) {
case OS::MemoryPermission::kNoAccess:
case OS::MemoryPermission::kNoAccessWillJitLater:
return PAGE_NOACCESS;
case OS::MemoryPermission::kRead:
return PAGE_READONLY;
Expand Down
5 changes: 4 additions & 1 deletion deps/v8/src/base/platform/platform.h
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,10 @@ class V8_BASE_EXPORT OS {
kReadWrite,
// TODO(hpayer): Remove this flag. Memory should never be rwx.
kReadWriteExecute,
kReadExecute
kReadExecute,
// TODO(jkummerow): Remove this when Wasm has a platform-independent
// w^x implementation.
kNoAccessWillJitLater
};

static bool HasLazyCommits();
Expand Down
10 changes: 6 additions & 4 deletions deps/v8/src/utils/allocation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -207,15 +207,17 @@ bool OnCriticalMemoryPressure(size_t length) {
}

VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
void* hint, size_t alignment)
void* hint, size_t alignment, JitPermission jit)
: page_allocator_(page_allocator) {
DCHECK_NOT_NULL(page_allocator);
DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
size_t page_size = page_allocator_->AllocatePageSize();
alignment = RoundUp(alignment, page_size);
Address address = reinterpret_cast<Address>(
AllocatePages(page_allocator_, hint, RoundUp(size, page_size), alignment,
PageAllocator::kNoAccess));
PageAllocator::Permission permissions =
jit == kMapAsJittable ? PageAllocator::kNoAccessWillJitLater
: PageAllocator::kNoAccess;
Address address = reinterpret_cast<Address>(AllocatePages(
page_allocator_, hint, RoundUp(size, page_size), alignment, permissions));
if (address != kNullAddress) {
DCHECK(IsAligned(address, alignment));
region_ = base::AddressRegion(address, size);
Expand Down
7 changes: 5 additions & 2 deletions deps/v8/src/utils/allocation.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,15 +150,18 @@ V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
// Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory final {
public:
enum JitPermission { kNoJit, kMapAsJittable };

// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory() = default;

// Reserves virtual memory containing an area of the given size that is
// aligned per |alignment| rounded up to the |page_allocator|'s allocate page
// size. The |size| must be aligned with |page_allocator|'s commit page size.
// This may not be at the position returned by address().
VirtualMemory(v8::PageAllocator* page_allocator, size_t size, void* hint,
size_t alignment = 1);
VirtualMemory(v8::PageAllocator* page_allocator,
size_t size, void* hint, size_t alignment = 1,
JitPermission jit = kNoJit);

// Construct a virtual memory by assigning it some already mapped address
// and size.
Expand Down
69 changes: 69 additions & 0 deletions deps/v8/src/wasm/code-space-access.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_WASM_CODE_SPACE_ACCESS_H_
#define V8_WASM_CODE_SPACE_ACCESS_H_

#include "src/base/build_config.h"
#include "src/base/macros.h"
#include "src/common/globals.h"

namespace v8 {
namespace internal {

#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)

// Ignoring this warning is considered better than relying on
// __builtin_available.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunguarded-availability-new"
inline void SwitchMemoryPermissionsToWritable() {
pthread_jit_write_protect_np(0);
}
inline void SwitchMemoryPermissionsToExecutable() {
pthread_jit_write_protect_np(1);
}
#pragma clang diagnostic pop

namespace wasm {

class CodeSpaceWriteScope {
public:
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
CodeSpaceWriteScope() {
if (code_space_write_nesting_level_ == 0) {
SwitchMemoryPermissionsToWritable();
}
code_space_write_nesting_level_++;
}
~CodeSpaceWriteScope() {
code_space_write_nesting_level_--;
if (code_space_write_nesting_level_ == 0) {
SwitchMemoryPermissionsToExecutable();
}
}

private:
static thread_local int code_space_write_nesting_level_;
};

#define CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope _write_access_;

} // namespace wasm

#else // Not Mac-on-arm64.

// Nothing to do, we map code memory with rwx permissions.
inline void SwitchMemoryPermissionsToWritable() {}
inline void SwitchMemoryPermissionsToExecutable() {}

#define CODE_SPACE_WRITE_SCOPE

#endif // V8_OS_MACOSX && V8_HOST_ARCH_ARM64

} // namespace internal
} // namespace v8

#endif // V8_WASM_CODE_SPACE_ACCESS_H_
20 changes: 19 additions & 1 deletion deps/v8/src/wasm/wasm-code-manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

#include <iomanip>

#include "src/base/build_config.h"
#include "src/base/adapters.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
Expand All @@ -21,6 +22,7 @@
#include "src/snapshot/embedded/embedded-data.h"
#include "src/utils/ostreams.h"
#include "src/utils/vector.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
Expand All @@ -45,6 +47,10 @@ namespace wasm {

using trap_handler::ProtectedInstructionData;

#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
#endif

base::AddressRegion DisjointAllocationPool::Merge(base::AddressRegion region) {
auto dest_it = regions_.begin();
auto dest_end = regions_.end();
Expand Down Expand Up @@ -626,6 +632,7 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
// Zap code area and collect freed code regions.
DisjointAllocationPool freed_regions;
size_t code_size = 0;
CODE_SPACE_WRITE_SCOPE
for (WasmCode* code : codes) {
ZapCode(code->instruction_start(), code->instructions().size());
FlushInstructionCache(code->instruction_start(),
Expand Down Expand Up @@ -731,6 +738,7 @@ CompilationEnv NativeModule::CreateCompilationEnv() const {
}

WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
CODE_SPACE_WRITE_SCOPE
return AddAndPublishAnonymousCode(code, WasmCode::kFunction);
}

Expand All @@ -742,6 +750,7 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
if (!lazy_compile_table_) {
uint32_t num_slots = module_->num_declared_functions;
WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE
DCHECK_EQ(1, code_space_data_.size());
lazy_compile_table_ = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
Expand Down Expand Up @@ -924,6 +933,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
OwnedVector<byte> reloc_info;
if (desc.reloc_size > 0) {
reloc_info = OwnedVector<byte>::New(desc.reloc_size);
CODE_SPACE_WRITE_SCOPE
memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
desc.reloc_size);
}
Expand Down Expand Up @@ -1070,6 +1080,7 @@ WasmCode* NativeModule::AddDeserializedCode(
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier) {
// CodeSpaceWriteScope is provided by the caller.
Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size());
memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
Expand Down Expand Up @@ -1126,6 +1137,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
Vector<uint8_t> code_space =
code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
DCHECK(!code_space.empty());
CODE_SPACE_WRITE_SCOPE
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{new WasmCode{
this, // native_module
Expand Down Expand Up @@ -1171,6 +1183,7 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
#endif // V8_OS_WIN64

WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE
WasmCode* jump_table = nullptr;
const uint32_t num_wasm_functions = module_->num_declared_functions;
const bool has_functions = num_wasm_functions > 0;
Expand Down Expand Up @@ -1353,7 +1366,11 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
if (!memory_tracker_->ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();

VirtualMemory mem(page_allocator, size, hint, allocate_page_size);
// When we start exposing Wasm in jitless mode, then the jitless flag
// will have to determine whether we set kMapAsJittable or not.
DCHECK(!FLAG_jitless);
VirtualMemory mem(page_allocator, size, hint, allocate_page_size,
VirtualMemory::kMapAsJittable);
if (!mem.IsReserved()) {
memory_tracker_->ReleaseReservation(size);
return {};
Expand Down Expand Up @@ -1513,6 +1530,7 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
DCHECK(!results.empty());
// First, allocate code space for all the results.
size_t total_code_space = 0;
CODE_SPACE_WRITE_SCOPE
for (auto& result : results) {
DCHECK(result.succeeded());
total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
Expand Down
2 changes: 2 additions & 0 deletions deps/v8/src/wasm/wasm-serialization.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
#include "src/utils/version.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
Expand Down Expand Up @@ -537,6 +538,7 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
protected_instructions_size);
reader->ReadVector(Vector<byte>::cast(protected_instructions.as_vector()));

CODE_SPACE_WRITE_SCOPE
WasmCode* code = native_module_->AddDeserializedCode(
fn_index, code_buffer, stack_slot_count, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
Expand Down

0 comments on commit 958686c

Please sign in to comment.