2017-03-21 10:16:54 +01:00
|
|
|
// Copyright 2014 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2019-03-12 09:01:49 +01:00
|
|
|
#include <map>
|
2022-09-21 13:28:42 +02:00
|
|
|
#include <optional>
|
2017-03-21 10:16:54 +01:00
|
|
|
|
2019-03-12 09:01:49 +01:00
|
|
|
#include "src/base/region-allocator.h"
|
2019-08-01 08:38:30 +02:00
|
|
|
#include "src/execution/isolate.h"
|
2017-03-21 10:16:54 +01:00
|
|
|
#include "src/heap/heap-inl.h"
|
2020-10-15 20:17:08 +02:00
|
|
|
#include "src/heap/memory-allocator.h"
|
2025-04-29 08:03:15 +02:00
|
|
|
#include "src/heap/page-pool.h"
|
2017-03-21 10:16:54 +01:00
|
|
|
#include "src/heap/spaces-inl.h"
|
2019-08-01 08:38:30 +02:00
|
|
|
#include "src/utils/ostreams.h"
|
2017-03-21 10:16:54 +01:00
|
|
|
#include "test/unittests/test-utils.h"
|
|
|
|
#include "testing/gtest/include/gtest/gtest.h"
|
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
|
2019-03-12 09:01:49 +01:00
|
|
|
// This is a v8::PageAllocator implementation that decorates provided page
|
|
|
|
// allocator object with page tracking functionality.
|
|
|
|
class TrackingPageAllocator : public ::v8::PageAllocator {
|
|
|
|
public:
|
|
|
|
explicit TrackingPageAllocator(v8::PageAllocator* page_allocator)
|
|
|
|
: page_allocator_(page_allocator),
|
|
|
|
allocate_page_size_(page_allocator_->AllocatePageSize()),
|
|
|
|
commit_page_size_(page_allocator_->CommitPageSize()),
|
|
|
|
region_allocator_(kNullAddress, size_t{0} - commit_page_size_,
|
|
|
|
commit_page_size_) {
|
|
|
|
CHECK_NOT_NULL(page_allocator);
|
|
|
|
CHECK(IsAligned(allocate_page_size_, commit_page_size_));
|
|
|
|
}
|
|
|
|
~TrackingPageAllocator() override = default;
|
|
|
|
|
|
|
|
size_t AllocatePageSize() override { return allocate_page_size_; }
|
|
|
|
|
|
|
|
size_t CommitPageSize() override { return commit_page_size_; }
|
|
|
|
|
|
|
|
void SetRandomMmapSeed(int64_t seed) override {
|
|
|
|
return page_allocator_->SetRandomMmapSeed(seed);
|
|
|
|
}
|
|
|
|
|
|
|
|
void* GetRandomMmapAddr() override {
|
|
|
|
return page_allocator_->GetRandomMmapAddr();
|
|
|
|
}
|
|
|
|
|
|
|
|
void* AllocatePages(void* address, size_t size, size_t alignment,
|
|
|
|
PageAllocator::Permission access) override {
|
|
|
|
void* result =
|
|
|
|
page_allocator_->AllocatePages(address, size, alignment, access);
|
|
|
|
if (result) {
|
|
|
|
// Mark pages as used.
|
|
|
|
Address current_page = reinterpret_cast<Address>(result);
|
|
|
|
CHECK(IsAligned(current_page, allocate_page_size_));
|
|
|
|
CHECK(IsAligned(size, allocate_page_size_));
|
|
|
|
CHECK(region_allocator_.AllocateRegionAt(current_page, size));
|
|
|
|
Address end = current_page + size;
|
|
|
|
while (current_page < end) {
|
2022-09-21 13:28:42 +02:00
|
|
|
PageState state{access, access != kNoAccess};
|
|
|
|
page_permissions_.insert({current_page, state});
|
2019-03-12 09:01:49 +01:00
|
|
|
current_page += commit_page_size_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool FreePages(void* address, size_t size) override {
|
|
|
|
bool result = page_allocator_->FreePages(address, size);
|
|
|
|
if (result) {
|
|
|
|
// Mark pages as free.
|
|
|
|
Address start = reinterpret_cast<Address>(address);
|
|
|
|
CHECK(IsAligned(start, allocate_page_size_));
|
|
|
|
CHECK(IsAligned(size, allocate_page_size_));
|
|
|
|
size_t freed_size = region_allocator_.FreeRegion(start);
|
|
|
|
CHECK(IsAligned(freed_size, commit_page_size_));
|
|
|
|
CHECK_EQ(RoundUp(freed_size, allocate_page_size_), size);
|
|
|
|
auto start_iter = page_permissions_.find(start);
|
|
|
|
CHECK_NE(start_iter, page_permissions_.end());
|
|
|
|
auto end_iter = page_permissions_.lower_bound(start + size);
|
|
|
|
page_permissions_.erase(start_iter, end_iter);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ReleasePages(void* address, size_t size, size_t new_size) override {
|
|
|
|
bool result = page_allocator_->ReleasePages(address, size, new_size);
|
|
|
|
if (result) {
|
|
|
|
Address start = reinterpret_cast<Address>(address);
|
|
|
|
CHECK(IsAligned(start, allocate_page_size_));
|
|
|
|
CHECK(IsAligned(size, commit_page_size_));
|
|
|
|
CHECK(IsAligned(new_size, commit_page_size_));
|
|
|
|
CHECK_LT(new_size, size);
|
|
|
|
CHECK_EQ(region_allocator_.TrimRegion(start, new_size), size - new_size);
|
|
|
|
auto start_iter = page_permissions_.find(start + new_size);
|
|
|
|
CHECK_NE(start_iter, page_permissions_.end());
|
|
|
|
auto end_iter = page_permissions_.lower_bound(start + size);
|
|
|
|
page_permissions_.erase(start_iter, end_iter);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-09-21 13:28:42 +02:00
|
|
|
bool RecommitPages(void* address, size_t size,
|
|
|
|
PageAllocator::Permission access) override {
|
|
|
|
bool result = page_allocator_->RecommitPages(address, size, access);
|
|
|
|
if (result) {
|
|
|
|
// Check that given range had given access permissions.
|
|
|
|
CheckPagePermissions(reinterpret_cast<Address>(address), size, access,
|
|
|
|
{});
|
|
|
|
UpdatePagePermissions(reinterpret_cast<Address>(address), size, access,
|
|
|
|
true);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DiscardSystemPages(void* address, size_t size) override {
|
|
|
|
bool result = page_allocator_->DiscardSystemPages(address, size);
|
|
|
|
if (result) {
|
|
|
|
UpdatePagePermissions(reinterpret_cast<Address>(address), size, {},
|
|
|
|
false);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-10-10 11:10:43 +02:00
|
|
|
bool DecommitPages(void* address, size_t size) override {
|
|
|
|
bool result = page_allocator_->DecommitPages(address, size);
|
|
|
|
if (result) {
|
|
|
|
// Mark pages as non-accessible.
|
2022-09-21 13:28:42 +02:00
|
|
|
UpdatePagePermissions(reinterpret_cast<Address>(address), size, kNoAccess,
|
|
|
|
false);
|
2021-10-10 11:10:43 +02:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-03-12 09:01:49 +01:00
|
|
|
bool SetPermissions(void* address, size_t size,
|
|
|
|
PageAllocator::Permission access) override {
|
|
|
|
bool result = page_allocator_->SetPermissions(address, size, access);
|
|
|
|
if (result) {
|
2022-09-21 13:28:42 +02:00
|
|
|
bool committed = access != kNoAccess && access != kNoAccessWillJitLater;
|
|
|
|
UpdatePagePermissions(reinterpret_cast<Address>(address), size, access,
|
|
|
|
committed);
|
2019-03-12 09:01:49 +01:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if all the allocated pages were freed.
|
|
|
|
bool IsEmpty() { return page_permissions_.empty(); }
|
|
|
|
|
|
|
|
void CheckIsFree(Address address, size_t size) {
|
|
|
|
CHECK(IsAligned(address, allocate_page_size_));
|
|
|
|
CHECK(IsAligned(size, allocate_page_size_));
|
|
|
|
EXPECT_TRUE(region_allocator_.IsFree(address, size));
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckPagePermissions(Address address, size_t size,
|
2022-09-21 13:28:42 +02:00
|
|
|
PageAllocator::Permission access,
|
|
|
|
std::optional<bool> committed = {true}) {
|
|
|
|
CHECK_IMPLIES(committed.has_value() && committed.value(),
|
|
|
|
access != PageAllocator::kNoAccess);
|
2019-03-12 09:01:49 +01:00
|
|
|
ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
|
2022-09-21 13:28:42 +02:00
|
|
|
if (committed.has_value()) {
|
|
|
|
EXPECT_EQ(committed.value(), value->second.committed);
|
|
|
|
}
|
|
|
|
EXPECT_EQ(access, value->second.access);
|
2019-03-12 09:01:49 +01:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
void Print(const char* comment) const {
|
|
|
|
i::StdoutStream os;
|
|
|
|
os << "\n========================================="
|
|
|
|
<< "\nTracingPageAllocator state: ";
|
|
|
|
if (comment) os << comment;
|
|
|
|
os << "\n-----------------------------------------\n";
|
|
|
|
region_allocator_.Print(os);
|
|
|
|
os << "-----------------------------------------"
|
|
|
|
<< "\nPage permissions:";
|
|
|
|
if (page_permissions_.empty()) {
|
|
|
|
os << " empty\n";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
os << "\n" << std::hex << std::showbase;
|
|
|
|
|
|
|
|
Address contiguous_region_start = static_cast<Address>(-1);
|
|
|
|
Address contiguous_region_end = contiguous_region_start;
|
|
|
|
PageAllocator::Permission contiguous_region_access =
|
|
|
|
PageAllocator::kNoAccess;
|
2022-09-21 13:28:42 +02:00
|
|
|
bool contiguous_region_access_committed = false;
|
2019-03-12 09:01:49 +01:00
|
|
|
for (auto& pair : page_permissions_) {
|
|
|
|
if (contiguous_region_end == pair.first &&
|
2022-09-21 13:28:42 +02:00
|
|
|
pair.second.access == contiguous_region_access &&
|
|
|
|
pair.second.committed == contiguous_region_access_committed) {
|
2019-03-12 09:01:49 +01:00
|
|
|
contiguous_region_end += commit_page_size_;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (contiguous_region_start != contiguous_region_end) {
|
|
|
|
PrintRegion(os, contiguous_region_start, contiguous_region_end,
|
2022-09-21 13:28:42 +02:00
|
|
|
contiguous_region_access,
|
|
|
|
contiguous_region_access_committed);
|
2019-03-12 09:01:49 +01:00
|
|
|
}
|
|
|
|
contiguous_region_start = pair.first;
|
|
|
|
contiguous_region_end = pair.first + commit_page_size_;
|
2022-09-21 13:28:42 +02:00
|
|
|
contiguous_region_access = pair.second.access;
|
|
|
|
contiguous_region_access_committed = pair.second.committed;
|
2019-03-12 09:01:49 +01:00
|
|
|
}
|
|
|
|
if (contiguous_region_start != contiguous_region_end) {
|
|
|
|
PrintRegion(os, contiguous_region_start, contiguous_region_end,
|
2022-09-21 13:28:42 +02:00
|
|
|
contiguous_region_access, contiguous_region_access_committed);
|
2019-03-12 09:01:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2022-09-21 13:28:42 +02:00
|
|
|
struct PageState {
|
|
|
|
PageAllocator::Permission access;
|
|
|
|
bool committed;
|
|
|
|
};
|
|
|
|
using PagePermissionsMap = std::map<Address, PageState>;
|
2019-05-28 08:46:21 -04:00
|
|
|
using ForEachFn = std::function<void(PagePermissionsMap::value_type*)>;
|
2019-03-12 09:01:49 +01:00
|
|
|
|
|
|
|
static void PrintRegion(std::ostream& os, Address start, Address end,
|
2022-09-21 13:28:42 +02:00
|
|
|
PageAllocator::Permission access, bool committed) {
|
2019-03-12 09:01:49 +01:00
|
|
|
os << " page: [" << start << ", " << end << "), access: ";
|
|
|
|
switch (access) {
|
|
|
|
case PageAllocator::kNoAccess:
|
2020-11-06 02:46:17 +01:00
|
|
|
case PageAllocator::kNoAccessWillJitLater:
|
2019-03-12 09:01:49 +01:00
|
|
|
os << "--";
|
|
|
|
break;
|
|
|
|
case PageAllocator::kRead:
|
|
|
|
os << "R";
|
|
|
|
break;
|
|
|
|
case PageAllocator::kReadWrite:
|
|
|
|
os << "RW";
|
|
|
|
break;
|
|
|
|
case PageAllocator::kReadWriteExecute:
|
|
|
|
os << "RWX";
|
|
|
|
break;
|
|
|
|
case PageAllocator::kReadExecute:
|
|
|
|
os << "RX";
|
|
|
|
break;
|
|
|
|
}
|
2022-09-21 13:28:42 +02:00
|
|
|
os << ", committed: " << static_cast<int>(committed) << "\n";
|
2019-03-12 09:01:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void ForEachPage(Address address, size_t size, const ForEachFn& fn) {
|
|
|
|
CHECK(IsAligned(address, commit_page_size_));
|
|
|
|
CHECK(IsAligned(size, commit_page_size_));
|
|
|
|
auto start_iter = page_permissions_.find(address);
|
|
|
|
// Start page must exist in page_permissions_.
|
|
|
|
CHECK_NE(start_iter, page_permissions_.end());
|
|
|
|
auto end_iter = page_permissions_.find(address + size - commit_page_size_);
|
|
|
|
// Ensure the last but one page exists in page_permissions_.
|
|
|
|
CHECK_NE(end_iter, page_permissions_.end());
|
|
|
|
// Now make it point to the next element in order to also process is by the
|
|
|
|
// following for loop.
|
|
|
|
++end_iter;
|
|
|
|
for (auto iter = start_iter; iter != end_iter; ++iter) {
|
|
|
|
PagePermissionsMap::value_type& pair = *iter;
|
|
|
|
fn(&pair);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void UpdatePagePermissions(Address address, size_t size,
|
2022-09-21 13:28:42 +02:00
|
|
|
std::optional<PageAllocator::Permission> access,
|
|
|
|
bool committed) {
|
2019-03-12 09:01:49 +01:00
|
|
|
ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
|
2022-09-21 13:28:42 +02:00
|
|
|
if (access.has_value()) {
|
|
|
|
value->second.access = access.value();
|
|
|
|
}
|
|
|
|
value->second.committed = committed;
|
2019-03-12 09:01:49 +01:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
v8::PageAllocator* const page_allocator_;
|
|
|
|
const size_t allocate_page_size_;
|
|
|
|
const size_t commit_page_size_;
|
|
|
|
// Region allocator tracks page allocation/deallocation requests.
|
|
|
|
base::RegionAllocator region_allocator_;
|
|
|
|
// This map keeps track of allocated pages' permissions.
|
|
|
|
PagePermissionsMap page_permissions_;
|
|
|
|
};
|
|
|
|
|
2022-04-12 11:10:15 +02:00
|
|
|
// This test is currently incompatible with the sandbox. Enable it
|
|
|
|
// once the VirtualAddressSpace interface is stable.
|
2022-09-21 13:28:42 +02:00
|
|
|
#if !V8_OS_FUCHSIA && !V8_ENABLE_SANDBOX
|
2022-04-19 09:00:36 +02:00
|
|
|
|
|
|
|
template <typename TMixin>
|
2024-03-30 09:54:35 +01:00
|
|
|
class PoolTestMixin : public TMixin {
|
2022-04-19 09:00:36 +02:00
|
|
|
public:
|
2024-03-30 09:54:35 +01:00
|
|
|
PoolTestMixin();
|
|
|
|
~PoolTestMixin() override;
|
2022-04-19 09:00:36 +02:00
|
|
|
};
|
|
|
|
|
2024-03-30 09:54:35 +01:00
|
|
|
class PoolTest : public //
|
|
|
|
WithInternalIsolateMixin< //
|
|
|
|
WithIsolateScopeMixin< //
|
|
|
|
WithIsolateMixin< //
|
|
|
|
PoolTestMixin< //
|
|
|
|
WithDefaultPlatformMixin< //
|
|
|
|
::testing::Test>>>>> {
|
2017-03-21 10:16:54 +01:00
|
|
|
public:
|
2024-03-30 09:54:35 +01:00
|
|
|
PoolTest() = default;
|
|
|
|
~PoolTest() override = default;
|
|
|
|
PoolTest(const PoolTest&) = delete;
|
|
|
|
PoolTest& operator=(const PoolTest&) = delete;
|
2017-03-21 10:16:54 +01:00
|
|
|
|
2022-04-19 09:00:36 +02:00
|
|
|
static void DoMixinSetUp() {
|
2019-03-12 09:01:49 +01:00
|
|
|
CHECK_NULL(tracking_page_allocator_);
|
|
|
|
old_page_allocator_ = GetPlatformPageAllocator();
|
|
|
|
tracking_page_allocator_ = new TrackingPageAllocator(old_page_allocator_);
|
|
|
|
CHECK(tracking_page_allocator_->IsEmpty());
|
|
|
|
CHECK_EQ(old_page_allocator_,
|
|
|
|
SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
|
2022-11-18 09:50:46 +00:00
|
|
|
old_sweeping_flag_ = i::v8_flags.concurrent_sweeping;
|
2022-09-21 13:28:42 +02:00
|
|
|
i::v8_flags.concurrent_sweeping = false;
|
2025-04-29 08:03:15 +02:00
|
|
|
IsolateGroup::ReleaseDefault();
|
2022-09-21 13:28:42 +02:00
|
|
|
#ifdef V8_ENABLE_SANDBOX
|
2022-04-12 11:10:15 +02:00
|
|
|
// Reinitialze the sandbox so it uses the TrackingPageAllocator.
|
2025-04-29 08:03:15 +02:00
|
|
|
Sandbox::current()->TearDown();
|
2021-10-10 11:10:43 +02:00
|
|
|
constexpr bool use_guard_regions = false;
|
2025-04-29 08:03:15 +02:00
|
|
|
CHECK(Sandbox::current()->Initialize(
|
2022-04-12 11:10:15 +02:00
|
|
|
tracking_page_allocator_, kSandboxMinimumSize, use_guard_regions));
|
2021-10-10 11:10:43 +02:00
|
|
|
#endif
|
2024-08-14 20:41:00 +02:00
|
|
|
IsolateGroup::InitializeOncePerProcess();
|
2017-03-21 10:16:54 +01:00
|
|
|
}
|
|
|
|
|
2022-04-19 09:00:36 +02:00
|
|
|
static void DoMixinTearDown() {
|
2025-04-29 08:03:15 +02:00
|
|
|
IsolateGroup::ReleaseDefault();
|
2022-09-21 13:28:42 +02:00
|
|
|
#ifdef V8_ENABLE_SANDBOX
|
2025-04-29 08:03:15 +02:00
|
|
|
Sandbox::current()->TearDown();
|
2021-07-14 11:30:07 +02:00
|
|
|
#endif
|
2022-11-18 09:50:46 +00:00
|
|
|
i::v8_flags.concurrent_sweeping = old_sweeping_flag_;
|
2019-03-12 09:01:49 +01:00
|
|
|
CHECK(tracking_page_allocator_->IsEmpty());
|
2020-03-05 10:49:19 -08:00
|
|
|
|
|
|
|
// Restore the original v8::PageAllocator and delete the tracking one.
|
|
|
|
CHECK_EQ(tracking_page_allocator_,
|
|
|
|
SetPlatformPageAllocatorForTesting(old_page_allocator_));
|
2019-03-12 09:01:49 +01:00
|
|
|
delete tracking_page_allocator_;
|
|
|
|
tracking_page_allocator_ = nullptr;
|
2025-04-29 08:03:15 +02:00
|
|
|
|
|
|
|
IsolateGroup::InitializeOncePerProcess();
|
2017-03-21 10:16:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Heap* heap() { return isolate()->heap(); }
|
|
|
|
MemoryAllocator* allocator() { return heap()->memory_allocator(); }
|
2025-04-29 08:03:15 +02:00
|
|
|
PagePool* pool() { return allocator()->pool(); }
|
2017-03-21 10:16:54 +01:00
|
|
|
|
2019-03-12 09:01:49 +01:00
|
|
|
TrackingPageAllocator* tracking_page_allocator() {
|
|
|
|
return tracking_page_allocator_;
|
|
|
|
}
|
|
|
|
|
2017-03-21 10:16:54 +01:00
|
|
|
private:
|
2019-03-12 09:01:49 +01:00
|
|
|
static TrackingPageAllocator* tracking_page_allocator_;
|
|
|
|
static v8::PageAllocator* old_page_allocator_;
|
2022-11-18 09:50:46 +00:00
|
|
|
static bool old_sweeping_flag_;
|
2017-03-21 10:16:54 +01:00
|
|
|
};
|
|
|
|
|
2024-03-30 09:54:35 +01:00
|
|
|
TrackingPageAllocator* PoolTest::tracking_page_allocator_ = nullptr;
|
|
|
|
v8::PageAllocator* PoolTest::old_page_allocator_ = nullptr;
|
|
|
|
bool PoolTest::old_sweeping_flag_;
|
2017-03-21 10:16:54 +01:00
|
|
|
|
2022-04-19 09:00:36 +02:00
|
|
|
template <typename TMixin>
|
2024-03-30 09:54:35 +01:00
|
|
|
PoolTestMixin<TMixin>::PoolTestMixin() {
|
|
|
|
PoolTest::DoMixinSetUp();
|
2022-04-19 09:00:36 +02:00
|
|
|
}
|
|
|
|
template <typename TMixin>
|
2024-03-30 09:54:35 +01:00
|
|
|
PoolTestMixin<TMixin>::~PoolTestMixin() {
|
|
|
|
PoolTest::DoMixinTearDown();
|
2022-04-19 09:00:36 +02:00
|
|
|
}
|
|
|
|
|
2017-03-21 10:16:54 +01:00
|
|
|
// See v8:5945.
|
2024-03-30 09:54:35 +01:00
|
|
|
TEST_F(PoolTest, UnmapOnTeardown) {
|
2024-04-19 12:51:52 +02:00
|
|
|
PageMetadata* page =
|
2022-04-19 09:00:36 +02:00
|
|
|
allocator()->AllocatePage(MemoryAllocator::AllocationMode::kRegular,
|
|
|
|
static_cast<PagedSpace*>(heap()->old_space()),
|
|
|
|
Executability::NOT_EXECUTABLE);
|
2024-04-19 12:51:52 +02:00
|
|
|
Address chunk_address = page->ChunkAddress();
|
2017-03-21 10:16:54 +01:00
|
|
|
EXPECT_NE(nullptr, page);
|
2019-03-12 09:01:49 +01:00
|
|
|
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
|
2024-04-19 12:51:52 +02:00
|
|
|
tracking_page_allocator()->CheckPagePermissions(chunk_address, page_size,
|
2019-03-12 09:01:49 +01:00
|
|
|
PageAllocator::kReadWrite);
|
|
|
|
|
2024-03-30 09:54:35 +01:00
|
|
|
allocator()->Free(MemoryAllocator::FreeMode::kPool, page);
|
2024-04-19 12:51:52 +02:00
|
|
|
tracking_page_allocator()->CheckPagePermissions(chunk_address, page_size,
|
2019-03-12 09:01:49 +01:00
|
|
|
PageAllocator::kReadWrite);
|
2025-04-29 08:03:15 +02:00
|
|
|
pool()->ReleaseImmediately(i_isolate());
|
2021-02-11 19:03:35 +01:00
|
|
|
#ifdef V8_COMPRESS_POINTERS
|
|
|
|
// In this mode Isolate uses bounded page allocator which allocates pages
|
|
|
|
// inside prereserved region. Thus these pages are kept reserved until
|
|
|
|
// the Isolate dies.
|
2022-09-21 13:28:42 +02:00
|
|
|
tracking_page_allocator()->CheckPagePermissions(
|
2024-04-19 12:51:52 +02:00
|
|
|
chunk_address, page_size, PageAllocator::kNoAccess, false);
|
2021-02-11 19:03:35 +01:00
|
|
|
#else
|
2024-04-19 12:51:52 +02:00
|
|
|
tracking_page_allocator()->CheckIsFree(chunk_address, page_size);
|
2021-02-11 19:03:35 +01:00
|
|
|
#endif // V8_COMPRESS_POINTERS
|
2017-03-21 10:16:54 +01:00
|
|
|
}
|
2022-09-21 13:28:42 +02:00
|
|
|
#endif // !V8_OS_FUCHSIA && !V8_ENABLE_SANDBOX
|
2017-03-21 10:16:54 +01:00
|
|
|
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|