v8 14.1.146 (node 25.0.0)
V8 is Google's open source JavaScript engine
Loading...
Searching...
No Matches
v8-platform.h
Go to the documentation of this file.
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_V8_PLATFORM_H_
6#define V8_V8_PLATFORM_H_
7
8#include <math.h>
9#include <stddef.h>
10#include <stdint.h>
11#include <stdlib.h> // For abort.
12
13#include <memory>
14#include <optional>
15#include <string>
16
17#include "v8-source-location.h" // NOLINT(build/include_directory)
18#include "v8config.h" // NOLINT(build/include_directory)
19
20namespace v8 {
21
22class Isolate;
23
24// Valid priorities supported by the task scheduling infrastructure.
25enum class TaskPriority : uint8_t {
26 /**
27 * Best effort tasks are not critical for performance of the application. The
28 * platform implementation should preempt such tasks if higher priority tasks
29 * arrive.
30 */
32 /**
33 * User visible tasks are long running background tasks that will
34 * improve performance and memory usage of the application upon completion.
35 * Example: background compilation and garbage collection.
36 */
38 /**
39 * User blocking tasks are highest priority tasks that block the execution
40 * thread (e.g. major garbage collection). They must be finished as soon as
41 * possible.
42 */
45};
46
47/**
48 * A Task represents a unit of work.
49 */
50class Task {
51 public:
52 virtual ~Task() = default;
53
54 virtual void Run() = 0;
55};
56
57/**
58 * An IdleTask represents a unit of work to be performed in idle time.
59 * The Run method is invoked with an argument that specifies the deadline in
60 * seconds returned by MonotonicallyIncreasingTime().
61 * The idle task is expected to complete by this deadline.
62 */
63class IdleTask {
64 public:
65 virtual ~IdleTask() = default;
66 virtual void Run(double deadline_in_seconds) = 0;
67};
68
69/**
70 * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
71 * post tasks after the isolate gets destructed, but these tasks may not get
72 * executed anymore. All tasks posted to a given TaskRunner will be invoked in
73 * sequence. Tasks can be posted from any thread.
74 */
76 public:
77 /**
78 * Schedules a task to be invoked by this TaskRunner. The TaskRunner
79 * implementation takes ownership of |task|.
80 *
81 * Embedders should override PostTaskImpl instead of this.
82 */
83 void PostTask(std::unique_ptr<Task> task,
84 SourceLocation location = SourceLocation::Current()) {
85 PostTaskImpl(std::move(task), location);
86 }
87
88 /**
89 * Schedules a task to be invoked by this TaskRunner. The TaskRunner
90 * implementation takes ownership of |task|. The |task| cannot be nested
91 * within other task executions.
92 *
93 * Tasks which shouldn't be interleaved with JS execution must be posted with
94 * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
95 * embedder may process tasks in a callback which is called during JS
96 * execution.
97 *
98 * In particular, tasks which execute JS must be non-nestable, since JS
99 * execution is not allowed to nest.
100 *
101 * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
102 *
103 * Embedders should override PostNonNestableTaskImpl instead of this.
104 */
106 std::unique_ptr<Task> task,
107 SourceLocation location = SourceLocation::Current()) {
108 PostNonNestableTaskImpl(std::move(task), location);
109 }
110
111 /**
112 * Schedules a task to be invoked by this TaskRunner. The task is scheduled
113 * after the given number of seconds |delay_in_seconds|. The TaskRunner
114 * implementation takes ownership of |task|.
115 *
116 * Embedders should override PostDelayedTaskImpl instead of this.
117 */
118 void PostDelayedTask(std::unique_ptr<Task> task, double delay_in_seconds,
119 SourceLocation location = SourceLocation::Current()) {
120 PostDelayedTaskImpl(std::move(task), delay_in_seconds, location);
121 }
122
123 /**
124 * Schedules a task to be invoked by this TaskRunner. The task is scheduled
125 * after the given number of seconds |delay_in_seconds|. The TaskRunner
126 * implementation takes ownership of |task|. The |task| cannot be nested
127 * within other task executions.
128 *
129 * Tasks which shouldn't be interleaved with JS execution must be posted with
130 * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
131 * embedder may process tasks in a callback which is called during JS
132 * execution.
133 *
134 * In particular, tasks which execute JS must be non-nestable, since JS
135 * execution is not allowed to nest.
136 *
137 * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
138 *
139 * Embedders should override PostNonNestableDelayedTaskImpl instead of this.
140 */
142 std::unique_ptr<Task> task, double delay_in_seconds,
143 SourceLocation location = SourceLocation::Current()) {
144 PostNonNestableDelayedTaskImpl(std::move(task), delay_in_seconds, location);
145 }
146
147 /**
148 * Schedules an idle task to be invoked by this TaskRunner. The task is
149 * scheduled when the embedder is idle. Requires that
150 * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
151 * relative to other task types and may be starved for an arbitrarily long
152 * time if no idle time is available. The TaskRunner implementation takes
153 * ownership of |task|.
154 *
155 * Embedders should override PostIdleTaskImpl instead of this.
156 */
157 void PostIdleTask(std::unique_ptr<IdleTask> task,
158 SourceLocation location = SourceLocation::Current()) {
159 PostIdleTaskImpl(std::move(task), location);
160 }
161
162 /**
163 * Returns true if idle tasks are enabled for this TaskRunner.
164 */
165 virtual bool IdleTasksEnabled() = 0;
166
167 /**
168 * Returns true if non-nestable tasks are enabled for this TaskRunner.
169 */
170 virtual bool NonNestableTasksEnabled() const { return false; }
171
172 /**
173 * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
174 */
175 virtual bool NonNestableDelayedTasksEnabled() const { return false; }
176
177 TaskRunner() = default;
178 virtual ~TaskRunner() = default;
179
180 TaskRunner(const TaskRunner&) = delete;
181 TaskRunner& operator=(const TaskRunner&) = delete;
182
183 protected:
184 /**
185 * Implementation of above methods with an additional `location` argument.
186 */
187 virtual void PostTaskImpl(std::unique_ptr<Task> task,
188 const SourceLocation& location) {}
189 virtual void PostNonNestableTaskImpl(std::unique_ptr<Task> task,
190 const SourceLocation& location) {}
191 virtual void PostDelayedTaskImpl(std::unique_ptr<Task> task,
192 double delay_in_seconds,
193 const SourceLocation& location) {}
194 virtual void PostNonNestableDelayedTaskImpl(std::unique_ptr<Task> task,
195 double delay_in_seconds,
196 const SourceLocation& location) {}
197 virtual void PostIdleTaskImpl(std::unique_ptr<IdleTask> task,
198 const SourceLocation& location) {}
199};
200
201/**
202 * Delegate that's passed to Job's worker task, providing an entry point to
203 * communicate with the scheduler.
204 */
206 public:
207 /**
208 * Returns true if this thread *must* return from the worker task on the
209 * current thread ASAP. Workers should periodically invoke ShouldYield (or
210 * YieldIfNeeded()) as often as is reasonable.
211 * After this method returned true, ShouldYield must not be called again.
212 */
213 virtual bool ShouldYield() = 0;
214
215 /**
216 * Notifies the scheduler that max concurrency was increased, and the number
217 * of worker should be adjusted accordingly. See Platform::PostJob() for more
218 * details.
219 */
220 virtual void NotifyConcurrencyIncrease() = 0;
221
222 /**
223 * Returns a task_id unique among threads currently running this job, such
224 * that GetTaskId() < worker count. To achieve this, the same task_id may be
225 * reused by a different thread after a worker_task returns.
226 */
227 virtual uint8_t GetTaskId() = 0;
228
229 /**
230 * Returns true if the current task is called from the thread currently
231 * running JobHandle::Join().
232 */
233 virtual bool IsJoiningThread() const = 0;
234};
235
236/**
237 * Handle returned when posting a Job. Provides methods to control execution of
238 * the posted Job.
239 */
241 public:
242 virtual ~JobHandle() = default;
243
244 /**
245 * Notifies the scheduler that max concurrency was increased, and the number
246 * of worker should be adjusted accordingly. See Platform::PostJob() for more
247 * details.
248 */
249 virtual void NotifyConcurrencyIncrease() = 0;
250
251 /**
252 * Contributes to the job on this thread. Doesn't return until all tasks have
253 * completed and max concurrency becomes 0. When Join() is called and max
254 * concurrency reaches 0, it should not increase again. This also promotes
255 * this Job's priority to be at least as high as the calling thread's
256 * priority.
257 */
258 virtual void Join() = 0;
259
260 /**
261 * Forces all existing workers to yield ASAP. Waits until they have all
262 * returned from the Job's callback before returning.
263 */
264 virtual void Cancel() = 0;
265
266 /*
267 * Forces all existing workers to yield ASAP but doesn’t wait for them.
268 * Warning, this is dangerous if the Job's callback is bound to or has access
269 * to state which may be deleted after this call.
270 */
271 virtual void CancelAndDetach() = 0;
272
273 /**
274 * Returns true if there's any work pending or any worker running.
275 */
276 virtual bool IsActive() = 0;
277
278 /**
279 * Returns true if associated with a Job and other methods may be called.
280 * Returns false after Join() or Cancel() was called. This may return true
281 * even if no workers are running and IsCompleted() returns true
282 */
283 virtual bool IsValid() = 0;
284
285 /**
286 * Returns true if job priority can be changed.
287 */
288 virtual bool UpdatePriorityEnabled() const { return false; }
289
290 /**
291 * Update this Job's priority.
292 */
293 virtual void UpdatePriority(TaskPriority new_priority) {}
294};
295
296/**
297 * A JobTask represents work to run in parallel from Platform::PostJob().
298 */
299class JobTask {
300 public:
301 virtual ~JobTask() = default;
302
303 virtual void Run(JobDelegate* delegate) = 0;
304
305 /**
306 * Controls the maximum number of threads calling Run() concurrently, given
307 * the number of threads currently assigned to this job and executing Run().
308 * Run() is only invoked if the number of threads previously running Run() was
309 * less than the value returned. In general, this should return the latest
310 * number of incomplete work items (smallest unit of work) left to process,
311 * including items that are currently in progress. |worker_count| is the
312 * number of threads currently assigned to this job which some callers may
313 * need to determine their return value. Since GetMaxConcurrency() is a leaf
314 * function, it must not call back any JobHandle methods.
315 */
316 virtual size_t GetMaxConcurrency(size_t worker_count) const = 0;
317};
318
319/**
320 * A "blocking call" refers to any call that causes the calling thread to wait
321 * off-CPU. It includes but is not limited to calls that wait on synchronous
322 * file I/O operations: read or write a file from disk, interact with a pipe or
323 * a socket, rename or delete a file, enumerate files in a directory, etc.
324 * Acquiring a low contention lock is not considered a blocking call.
325 */
326
327/**
328 * BlockingType indicates the likelihood that a blocking call will actually
329 * block.
330 */
331enum class BlockingType {
332 // The call might block (e.g. file I/O that might hit in memory cache).
333 kMayBlock,
334 // The call will definitely block (e.g. cache already checked and now pinging
335 // server synchronously).
337};
338
339/**
340 * This class is instantiated with CreateBlockingScope() in every scope where a
341 * blocking call is made and serves as a precise annotation of the scope that
342 * may/will block. May be implemented by an embedder to adjust the thread count.
343 * CPU usage should be minimal within that scope. ScopedBlockingCalls can be
344 * nested.
345 */
347 public:
348 virtual ~ScopedBlockingCall() = default;
349};
350
351/**
352 * The interface represents complex arguments to trace events.
353 */
355 public:
356 virtual ~ConvertableToTraceFormat() = default;
357
358 /**
359 * Append the class info to the provided |out| string. The appended
360 * data must be a valid JSON object. Strings must be properly quoted, and
361 * escaped. There is no processing applied to the content after it is
362 * appended.
363 */
364 virtual void AppendAsTraceFormat(std::string* out) const = 0;
365};
366
367/**
368 * V8 Tracing controller.
369 *
370 * Can be implemented by an embedder to record trace events from V8.
371 *
372 * Will become obsolete in Perfetto SDK build (v8_use_perfetto = true).
373 */
375 public:
376 virtual ~TracingController() = default;
377
378 // In Perfetto mode, trace events are written using Perfetto's Track Event
379 // API directly without going through the embedder. However, it is still
380 // possible to observe tracing being enabled and disabled.
381#if !defined(V8_USE_PERFETTO)
382 /**
383 * Called by TRACE_EVENT* macros, don't call this directly.
384 * The name parameter is a category group for example:
385 * TRACE_EVENT0("v8,parse", "V8.Parse")
386 * The pointer returned points to a value with zero or more of the bits
387 * defined in CategoryGroupEnabledFlags.
388 **/
389 virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
390 static uint8_t no = 0;
391 return &no;
392 }
393
394 /**
395 * Adds a trace event to the platform tracing system. These function calls are
396 * usually the result of a TRACE_* macro from trace-event-no-perfetto.h when
397 * tracing and the category of the particular trace are enabled. It is not
398 * advisable to call these functions on their own; they are really only meant
399 * to be used by the trace macros. The returned handle can be used by
400 * UpdateTraceEventDuration to update the duration of COMPLETE events.
401 */
402 virtual uint64_t AddTraceEvent(
403 char phase, const uint8_t* category_enabled_flag, const char* name,
404 const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
405 const char** arg_names, const uint8_t* arg_types,
406 const uint64_t* arg_values,
407 std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
408 unsigned int flags) {
409 return 0;
410 }
412 char phase, const uint8_t* category_enabled_flag, const char* name,
413 const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
414 const char** arg_names, const uint8_t* arg_types,
415 const uint64_t* arg_values,
416 std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
417 unsigned int flags, int64_t timestamp) {
418 return 0;
419 }
420
421 /**
422 * Sets the duration field of a COMPLETE trace event. It must be called with
423 * the handle returned from AddTraceEvent().
424 **/
425 virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
426 const char* name, uint64_t handle) {}
427#endif // !defined(V8_USE_PERFETTO)
428
430 public:
431 virtual ~TraceStateObserver() = default;
432 virtual void OnTraceEnabled() = 0;
433 virtual void OnTraceDisabled() = 0;
434 };
435
436 /**
437 * Adds tracing state change observer.
438 * Does nothing in Perfetto SDK build (v8_use_perfetto = true).
439 */
441
442 /**
443 * Removes tracing state change observer.
444 * Does nothing in Perfetto SDK build (v8_use_perfetto = true).
445 */
447};
448
449/**
450 * A V8 memory page allocator.
451 *
452 * Can be implemented by an embedder to manage large host OS allocations.
453 */
455 public:
456 virtual ~PageAllocator() = default;
457
458 /**
459 * Gets the page granularity for AllocatePages and FreePages. Addresses and
460 * lengths for those calls should be multiples of AllocatePageSize().
461 */
463
464 /**
465 * Gets the page granularity for SetPermissions and ReleasePages. Addresses
466 * and lengths for those calls should be multiples of CommitPageSize().
467 */
468 virtual size_t CommitPageSize() = 0;
469
470 /**
471 * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
472 * sequences of random mmap addresses.
473 */
474 virtual void SetRandomMmapSeed(int64_t seed) = 0;
475
476 /**
477 * Returns a randomized address, suitable for memory allocation under ASLR.
478 * The address will be aligned to AllocatePageSize.
479 */
480 virtual void* GetRandomMmapAddr() = 0;
481
482 /**
483 * Memory permissions.
484 */
491 // Set this when reserving memory that will later require kReadWriteExecute
492 // permissions. The resulting behavior is platform-specific, currently
493 // this is used to set the MAP_JIT flag on Apple Silicon.
494 // TODO(jkummerow): Remove this when Wasm has a platform-independent
495 // w^x implementation.
496 // TODO(saelo): Remove this once all JIT pages are allocated through the
497 // VirtualAddressSpace API.
499 };
500
501 /**
502 * Optional hints for AllocatePages().
503 */
504 class AllocationHint final {
505 public:
506 AllocationHint() = default;
507
508 V8_WARN_UNUSED_RESULT constexpr AllocationHint WithAddress(
509 void* address) const {
510 return AllocationHint(address, may_grow_);
511 }
512
513 V8_WARN_UNUSED_RESULT constexpr AllocationHint WithMayGrow() const {
514 return AllocationHint(address_, true);
515 }
516
517 bool MayGrow() const { return may_grow_; }
518 void* Address() const { return address_; }
519
520 private:
521 constexpr AllocationHint(void* address, bool may_grow)
522 : address_(address), may_grow_(may_grow) {}
523
524 void* address_ = nullptr;
525 bool may_grow_ = false;
526 };
527
528 /**
529 * Allocates memory in range with the given alignment and permission.
530 */
531 virtual void* AllocatePages(void* address, size_t length, size_t alignment,
532 Permission permissions) = 0;
533
534 /**
535 * Allocates memory in range with the given alignment and permission. In
536 * addition to AllocatePages it allows to pass in allocation hints. The
537 * underlying implementation may not make use of hints.
538 */
539 virtual void* AllocatePages(size_t length, size_t alignment,
540 Permission permissions, AllocationHint hint) {
541 return AllocatePages(hint.Address(), length, alignment, permissions);
542 }
543
544 /**
545 * Resizes the previously allocated memory at the given address. Returns true
546 * if the allocation could be resized. Returns false if this operation is
547 * either not supported or the object could not be resized in-place.
548 */
549 virtual bool ResizeAllocationAt(void* address, size_t old_length,
550 size_t new_length, Permission permissions) {
551 return false;
552 }
553
554 /**
555 * Frees memory in a range that was allocated by a call to AllocatePages.
556 */
557 virtual bool FreePages(void* address, size_t length) = 0;
558
559 /**
560 * Releases memory in a range that was allocated by a call to AllocatePages.
561 */
562 virtual bool ReleasePages(void* address, size_t length,
563 size_t new_length) = 0;
564
565 /**
566 * Sets permissions on pages in an allocated range.
567 */
568 virtual bool SetPermissions(void* address, size_t length,
569 Permission permissions) = 0;
570
571 /**
572 * Recommits discarded pages in the given range with given permissions.
573 * Discarded pages must be recommitted with their original permissions
574 * before they are used again.
575 */
576 virtual bool RecommitPages(void* address, size_t length,
577 Permission permissions) {
578 // TODO(v8:12797): make it pure once it's implemented on Chromium side.
579 return false;
580 }
581
582 /**
583 * Frees memory in the given [address, address + size) range. address and size
584 * should be operating system page-aligned. The next write to this
585 * memory area brings the memory transparently back. This should be treated as
586 * a hint to the OS that the pages are no longer needed. It does not guarantee
587 * that the pages will be discarded immediately or at all.
588 */
589 virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
590
591 /**
592 * Decommits any wired memory pages in the given range, allowing the OS to
593 * reclaim them, and marks the region as inacessible (kNoAccess). The address
594 * range stays reserved and can be accessed again later by changing its
595 * permissions. However, in that case the memory content is guaranteed to be
596 * zero-initialized again. The memory must have been previously allocated by a
597 * call to AllocatePages. Returns true on success, false otherwise.
598 */
599 virtual bool DecommitPages(void* address, size_t size) = 0;
600
601 /**
602 * Block any modifications to the given mapping such as changing permissions
603 * or unmapping the pages on supported platforms.
604 * The address space reservation will exist until the process ends, but it's
605 * possible to release the memory using DiscardSystemPages. Note that this
606 * might require write permissions to the page as e.g. on Linux, mseal will
607 * block discarding sealed anonymous memory.
608 */
609 virtual bool SealPages(void* address, size_t length) {
610 // TODO(360048056): make it pure once it's implemented on Chromium side.
611 return false;
612 }
613
614 /**
615 * INTERNAL ONLY: This interface has not been stabilised and may change
616 * without notice from one release to another without being deprecated first.
617 */
619 public:
620 // Implementations are expected to free the shared memory mapping in the
621 // destructor.
622 virtual ~SharedMemoryMapping() = default;
623 virtual void* GetMemory() const = 0;
624 };
625
626 /**
627 * INTERNAL ONLY: This interface has not been stabilised and may change
628 * without notice from one release to another without being deprecated first.
629 */
631 public:
632 // Implementations are expected to free the shared memory in the destructor.
633 virtual ~SharedMemory() = default;
635 void* new_address) const = 0;
636 virtual void* GetMemory() const = 0;
637 virtual size_t GetSize() const = 0;
638 };
639
640 /**
641 * INTERNAL ONLY: This interface has not been stabilised and may change
642 * without notice from one release to another without being deprecated first.
643 *
644 * Reserve pages at a fixed address returning whether the reservation is
645 * possible. The reserved memory is detached from the PageAllocator and so
646 * should not be freed by it. It's intended for use with
647 * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory.
648 */
649 virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) {
650 return false;
651 }
652
653 /**
654 * INTERNAL ONLY: This interface has not been stabilised and may change
655 * without notice from one release to another without being deprecated first.
656 *
657 * Allocates shared memory pages. Not all PageAllocators need support this and
658 * so this method need not be overridden.
659 * Allocates a new read-only shared memory region of size |length| and copies
660 * the memory at |original_address| into it.
661 */
663 size_t length, const void* original_address) {
664 return {};
665 }
666
667 /**
668 * INTERNAL ONLY: This interface has not been stabilised and may change
669 * without notice from one release to another without being deprecated first.
670 *
671 * If not overridden and changed to return true, V8 will not attempt to call
672 * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages
673 * and RemapSharedPages must also be overridden.
674 */
675 virtual bool CanAllocateSharedPages() { return false; }
676};
677
678/**
679 * An allocator that uses per-thread permissions to protect the memory.
680 *
681 * The implementation is platform/hardware specific, e.g. using pkeys on x64.
682 *
683 * INTERNAL ONLY: This interface has not been stabilised and may change
684 * without notice from one release to another without being deprecated first.
685 */
687 public:
688 virtual ~ThreadIsolatedAllocator() = default;
689
690 virtual void* Allocate(size_t size) = 0;
691
692 virtual void Free(void* object) = 0;
693
694 enum class Type {
695 kPkey,
696 };
697
698 virtual Type Type() const = 0;
699
700 /**
701 * Return the pkey used to implement the thread isolation if Type == kPkey.
702 */
703 virtual int Pkey() const { return -1; }
704};
705
706// Opaque type representing a handle to a shared memory region.
707using PlatformSharedMemoryHandle = intptr_t;
708static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1;
709
710// Conversion routines from the platform-dependent shared memory identifiers
711// into the opaque PlatformSharedMemoryHandle type. These use the underlying
712// types (e.g. unsigned int) instead of the typedef'd ones (e.g. mach_port_t)
713// to avoid pulling in large OS header files into this header file. Instead,
714// the users of these routines are expected to include the respecitve OS
715// headers in addition to this one.
716#if V8_OS_DARWIN
717// Convert between a shared memory handle and a mach_port_t referencing a memory
718// entry object.
720 unsigned int port) {
721 return static_cast<PlatformSharedMemoryHandle>(port);
722}
723inline unsigned int MachMemoryEntryFromSharedMemoryHandle(
725 return static_cast<unsigned int>(handle);
726}
727#elif V8_OS_FUCHSIA
728// Convert between a shared memory handle and a zx_handle_t to a VMO.
730 return static_cast<PlatformSharedMemoryHandle>(handle);
731}
733 return static_cast<uint32_t>(handle);
734}
735#elif V8_OS_WIN
736// Convert between a shared memory handle and a Windows HANDLE to a file mapping
737// object.
739 void* handle) {
740 return reinterpret_cast<PlatformSharedMemoryHandle>(handle);
741}
744 return reinterpret_cast<void*>(handle);
745}
746#else
747// Convert between a shared memory handle and a file descriptor.
748inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd) {
749 return static_cast<PlatformSharedMemoryHandle>(fd);
750}
752 PlatformSharedMemoryHandle handle) {
753 return static_cast<int>(handle);
754}
755#endif
756
757/**
758 * Possible permissions for memory pages.
759 */
760enum class PagePermissions {
761 kNoAccess,
762 kRead,
766};
767
768/**
769 * Class to manage a virtual memory address space.
770 *
771 * This class represents a contiguous region of virtual address space in which
772 * sub-spaces and (private or shared) memory pages can be allocated, freed, and
773 * modified. This interface is meant to eventually replace the PageAllocator
774 * interface, and can be used as an alternative in the meantime.
775 *
776 * This API is not yet stable and may change without notice!
777 */
779 public:
780 using Address = uintptr_t;
781
782 VirtualAddressSpace(size_t page_size, size_t allocation_granularity,
783 Address base, size_t size,
784 PagePermissions max_page_permissions)
787 base_(base),
788 size_(size),
789 max_page_permissions_(max_page_permissions) {}
790
791 virtual ~VirtualAddressSpace() = default;
792
793 /**
794 * The page size used inside this space. Guaranteed to be a power of two.
795 * Used as granularity for all page-related operations except for allocation,
796 * which use the allocation_granularity(), see below.
797 *
798 * \returns the page size in bytes.
799 */
800 size_t page_size() const { return page_size_; }
801
802 /**
803 * The granularity of page allocations and, by extension, of subspace
804 * allocations. This is guaranteed to be a power of two and a multiple of the
805 * page_size(). In practice, this is equal to the page size on most OSes, but
806 * on Windows it is usually 64KB, while the page size is 4KB.
807 *
808 * \returns the allocation granularity in bytes.
809 */
810 size_t allocation_granularity() const { return allocation_granularity_; }
811
812 /**
813 * The base address of the address space managed by this instance.
814 *
815 * \returns the base address of this address space.
816 */
817 Address base() const { return base_; }
818
819 /**
820 * The size of the address space managed by this instance.
821 *
822 * \returns the size of this address space in bytes.
823 */
824 size_t size() const { return size_; }
825
826 /**
827 * The maximum page permissions that pages allocated inside this space can
828 * obtain.
829 *
830 * \returns the maximum page permissions.
831 */
832 PagePermissions max_page_permissions() const { return max_page_permissions_; }
833
834 /**
835 * Whether the |address| is inside the address space managed by this instance.
836 *
837 * \returns true if it is inside the address space, false if not.
838 */
839 bool Contains(Address address) const {
840 return (address >= base()) && (address < base() + size());
841 }
842
843 /**
844 * Sets the random seed so that GetRandomPageAddress() will generate
845 * repeatable sequences of random addresses.
846 *
847 * \param The seed for the PRNG.
848 */
849 virtual void SetRandomSeed(int64_t seed) = 0;
850
851 /**
852 * Returns a random address inside this address space, suitable for page
853 * allocations hints.
854 *
855 * \returns a random address aligned to allocation_granularity().
856 */
857 virtual Address RandomPageAddress() = 0;
858
859 /**
860 * Allocates private memory pages with the given alignment and permissions.
861 *
862 * \param hint If nonzero, the allocation is attempted to be placed at the
863 * given address first. If that fails, the allocation is attempted to be
864 * placed elsewhere, possibly nearby, but that is not guaranteed. Specifying
865 * zero for the hint always causes this function to choose a random address.
866 * The hint, if specified, must be aligned to the specified alignment.
867 *
868 * \param size The size of the allocation in bytes. Must be a multiple of the
869 * allocation_granularity().
870 *
871 * \param alignment The alignment of the allocation in bytes. Must be a
872 * multiple of the allocation_granularity() and should be a power of two.
873 *
874 * \param permissions The page permissions of the newly allocated pages.
875 *
876 * \returns the start address of the allocated pages on success, zero on
877 * failure.
878 */
879 static constexpr Address kNoHint = 0;
880 virtual V8_WARN_UNUSED_RESULT Address
881 AllocatePages(Address hint, size_t size, size_t alignment,
882 PagePermissions permissions) = 0;
883
884 /**
885 * Frees previously allocated pages.
886 *
887 * This function will terminate the process on failure as this implies a bug
888 * in the client. As such, there is no return value.
889 *
890 * \param address The start address of the pages to free. This address must
891 * have been obtained through a call to AllocatePages.
892 *
893 * \param size The size in bytes of the region to free. This must match the
894 * size passed to AllocatePages when the pages were allocated.
895 */
896 virtual void FreePages(Address address, size_t size) = 0;
897
898 /**
899 * Sets permissions of all allocated pages in the given range.
900 *
901 * This operation can fail due to OOM, in which case false is returned. If
902 * the operation fails for a reason other than OOM, this function will
903 * terminate the process as this implies a bug in the client.
904 *
905 * \param address The start address of the range. Must be aligned to
906 * page_size().
907 *
908 * \param size The size in bytes of the range. Must be a multiple
909 * of page_size().
910 *
911 * \param permissions The new permissions for the range.
912 *
913 * \returns true on success, false on OOM.
914 */
916 Address address, size_t size, PagePermissions permissions) = 0;
917
918 /**
919 * Creates a guard region at the specified address.
920 *
921 * Guard regions are guaranteed to cause a fault when accessed and generally
922 * do not count towards any memory consumption limits. Further, allocating
923 * guard regions can usually not fail in subspaces if the region does not
924 * overlap with another region, subspace, or page allocation.
925 *
926 * \param address The start address of the guard region. Must be aligned to
927 * the allocation_granularity().
928 *
929 * \param size The size of the guard region in bytes. Must be a multiple of
930 * the allocation_granularity().
931 *
932 * \returns true on success, false otherwise.
933 */
934 virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address,
935 size_t size) = 0;
936
937 /**
938 * Frees an existing guard region.
939 *
940 * This function will terminate the process on failure as this implies a bug
941 * in the client. As such, there is no return value.
942 *
943 * \param address The start address of the guard region to free. This address
944 * must have previously been used as address parameter in a successful
945 * invocation of AllocateGuardRegion.
946 *
947 * \param size The size in bytes of the guard region to free. This must match
948 * the size passed to AllocateGuardRegion when the region was created.
949 */
950 virtual void FreeGuardRegion(Address address, size_t size) = 0;
951
952 /**
953 * Allocates shared memory pages with the given permissions.
954 *
955 * \param hint Placement hint. See AllocatePages.
956 *
957 * \param size The size of the allocation in bytes. Must be a multiple of the
958 * allocation_granularity().
959 *
960 * \param permissions The page permissions of the newly allocated pages.
961 *
962 * \param handle A platform-specific handle to a shared memory object. See
963 * the SharedMemoryHandleFromX routines above for ways to obtain these.
964 *
965 * \param offset The offset in the shared memory object at which the mapping
966 * should start. Must be a multiple of the allocation_granularity().
967 *
968 * \returns the start address of the allocated pages on success, zero on
969 * failure.
970 */
971 virtual V8_WARN_UNUSED_RESULT Address
972 AllocateSharedPages(Address hint, size_t size, PagePermissions permissions,
973 PlatformSharedMemoryHandle handle, uint64_t offset) = 0;
974
975 /**
976 * Frees previously allocated shared pages.
977 *
978 * This function will terminate the process on failure as this implies a bug
979 * in the client. As such, there is no return value.
980 *
981 * \param address The start address of the pages to free. This address must
982 * have been obtained through a call to AllocateSharedPages.
983 *
984 * \param size The size in bytes of the region to free. This must match the
985 * size passed to AllocateSharedPages when the pages were allocated.
986 */
987 virtual void FreeSharedPages(Address address, size_t size) = 0;
988
989 /**
990 * Memory protection key support.
991 *
992 * If supported by the hardware and operating system, virtual address spaces
993 * can use memory protection keys in addition to the regular page
994 * permissions. The MemoryProtectionKeyId type identifies a memory protection
995 * key and is used by the related APIs in this class.
996 *
997 * TODO(saelo): consider renaming to just MemoryProtectionKey, but currently
998 * there's a naming conflict with base::MemoryProtectionKey.
999 */
1000 using MemoryProtectionKeyId = int;
1001
1002 /**
1003 * The memory protection key used by this space, if any.
1004 *
1005 * If this space uses a memory protection key, then all memory pages in it
1006 * will have this key set. In that case, this API will return that key.
1007 *
1008 * \returns the memory protection key used by this space or std::nullopt.
1009 */
1010 virtual std::optional<MemoryProtectionKeyId> ActiveMemoryProtectionKey() = 0;
1011
1012 /**
1013 * Whether this instance can allocate subspaces or not.
1014 *
1015 * \returns true if subspaces can be allocated, false if not.
1016 */
1017 virtual bool CanAllocateSubspaces() = 0;
1018
1019 /*
1020 * Allocate a subspace.
1021 *
1022 * The address space of a subspace stays reserved in the parent space for the
1023 * lifetime of the subspace. As such, it is guaranteed that page allocations
1024 * on the parent space cannot end up inside a subspace.
1025 *
1026 * \param hint Hints where the subspace should be allocated. See
1027 * AllocatePages() for more details.
1028 *
1029 * \param size The size in bytes of the subspace. Must be a multiple of the
1030 * allocation_granularity().
1031 *
1032 * \param alignment The alignment of the subspace in bytes. Must be a multiple
1033 * of the allocation_granularity() and should be a power of two.
1034 *
1035 * \param max_page_permissions The maximum permissions that pages allocated in
1036 * the subspace can obtain.
1037 *
1038 * \param key Optional memory protection key for the subspace. If used, the
1039 * returned subspace will use this key for all its memory pages.
1040 *
1041 * \returns a new subspace or nullptr on failure.
1042 */
1044 Address hint, size_t size, size_t alignment,
1045 PagePermissions max_page_permissions,
1046 std::optional<MemoryProtectionKeyId> key = std::nullopt) = 0;
1047
1048 //
1049 // TODO(v8) maybe refactor the methods below before stabilizing the API. For
1050 // example by combining them into some form of page operation method that
1051 // takes a command enum as parameter.
1052 //
1053
1054 /**
1055 * Recommits discarded pages in the given range with given permissions.
1056 * Discarded pages must be recommitted with their original permissions
1057 * before they are used again.
1058 *
1059 * \param address The start address of the range. Must be aligned to
1060 * page_size().
1061 *
1062 * \param size The size in bytes of the range. Must be a multiple
1063 * of page_size().
1064 *
1065 * \param permissions The permissions for the range that the pages must have.
1066 *
1067 * \returns true on success, false otherwise.
1068 */
1070 Address address, size_t size, PagePermissions permissions) = 0;
1071
1072 /**
1073 * Frees memory in the given [address, address + size) range. address and
1074 * size should be aligned to the page_size(). The next write to this memory
1075 * area brings the memory transparently back. This should be treated as a
1076 * hint to the OS that the pages are no longer needed. It does not guarantee
1077 * that the pages will be discarded immediately or at all.
1078 *
1079 * \returns true on success, false otherwise. Since this method is only a
1080 * hint, a successful invocation does not imply that pages have been removed.
1081 */
1082 virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address,
1083 size_t size) {
1084 return true;
1085 }
1086 /**
1087 * Decommits any wired memory pages in the given range, allowing the OS to
1088 * reclaim them, and marks the region as inacessible (kNoAccess). The address
1089 * range stays reserved and can be accessed again later by changing its
1090 * permissions. However, in that case the memory content is guaranteed to be
1091 * zero-initialized again. The memory must have been previously allocated by a
1092 * call to AllocatePages.
1093 *
1094 * \returns true on success, false otherwise.
1095 */
1096 virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address,
1097 size_t size) = 0;
1098
1099 private:
1100 const size_t page_size_;
1101 const size_t allocation_granularity_;
1102 const Address base_;
1103 const size_t size_;
1104 const PagePermissions max_page_permissions_;
1105};
1106
1107/**
1108 * Observer used by V8 to notify the embedder about entering/leaving sections
1109 * with high throughput of malloc/free operations.
1110 */
1112 public:
1113 virtual void EnterSection() {}
1114 virtual void LeaveSection() {}
1115};
1116
1117/**
1118 * V8 Platform abstraction layer.
1119 *
1120 * The embedder has to provide an implementation of this interface before
1121 * initializing the rest of V8.
1122 */
1124 public:
1125 virtual ~Platform() = default;
1126
1127 /**
1128 * Allows the embedder to manage memory page allocations.
1129 * Returning nullptr will cause V8 to use the default page allocator.
1130 */
1131 virtual PageAllocator* GetPageAllocator() { return nullptr; }
1132
1133 /**
1134 * Allows the embedder to provide an allocator that uses per-thread memory
1135 * permissions to protect allocations.
1136 * Returning nullptr will cause V8 to disable protections that rely on this
1137 * feature.
1138 */
1140 return nullptr;
1141 }
1142
1143 /**
1144 * Enables the embedder to respond in cases where V8 can't allocate large
1145 * blocks of memory. V8 retries the failed allocation once after calling this
1146 * method. On success, execution continues; otherwise V8 exits with a fatal
1147 * error.
1148 * Embedder overrides of this function must NOT call back into V8.
1149 */
1151
1152 /**
1153 * Gets the max number of worker threads that may be used to execute
1154 * concurrent work scheduled for any single TaskPriority by
1155 * Call(BlockingTask)OnWorkerThread() or PostJob(). This can be used to
1156 * estimate the number of tasks a work package should be split into. A return
1157 * value of 0 means that there are no worker threads available. Note that a
1158 * value of 0 won't prohibit V8 from posting tasks using |CallOnWorkerThread|.
1159 */
1160 virtual int NumberOfWorkerThreads() = 0;
1161
1162 /**
1163 * Returns a TaskRunner which can be used to post a task on the foreground.
1164 * The TaskRunner's NonNestableTasksEnabled() must be true. This function
1165 * should only be called from a foreground thread.
1166 */
1167 std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(Isolate* isolate) {
1169 }
1170
1171 /**
1172 * Returns a TaskRunner with a specific |priority| which can be used to post a
1173 * task on the foreground thread. The TaskRunner's NonNestableTasksEnabled()
1174 * must be true. This function should only be called from a foreground thread.
1175 */
1176 virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
1177 Isolate* isolate, TaskPriority priority) = 0;
1178
1179 /**
1180 * Schedules a task to be invoked on a worker thread.
1181 * Embedders should override PostTaskOnWorkerThreadImpl() instead of
1182 * CallOnWorkerThread().
1183 */
1184 V8_DEPRECATE_SOON("Use PostTaskOnWorkerThread instead.")
1185 void CallOnWorkerThread(std::unique_ptr<Task> task,
1186 SourceLocation location = SourceLocation::Current()) {
1187 PostTaskOnWorkerThreadImpl(TaskPriority::kUserVisible, std::move(task),
1188 location);
1189 }
1190
1191 /**
1192 * Schedules a task that blocks the main thread to be invoked with
1193 * high-priority on a worker thread.
1194 * Embedders should override PostTaskOnWorkerThreadImpl() instead of
1195 * CallBlockingTaskOnWorkerThread().
1196 */
1197 V8_DEPRECATE_SOON("Use PostTaskOnWorkerThread instead.")
1199 std::unique_ptr<Task> task,
1200 SourceLocation location = SourceLocation::Current()) {
1201 // Embedders may optionally override this to process these tasks in a high
1202 // priority pool.
1203 PostTaskOnWorkerThreadImpl(TaskPriority::kUserBlocking, std::move(task),
1204 location);
1205 }
1206
1207 /**
1208 * Schedules a task to be invoked with low-priority on a worker thread.
1209 * Embedders should override PostTaskOnWorkerThreadImpl() instead of
1210 * CallLowPriorityTaskOnWorkerThread().
1211 */
1212 V8_DEPRECATE_SOON("Use PostTaskOnWorkerThread instead.")
1214 std::unique_ptr<Task> task,
1215 SourceLocation location = SourceLocation::Current()) {
1216 // Embedders may optionally override this to process these tasks in a low
1217 // priority pool.
1218 PostTaskOnWorkerThreadImpl(TaskPriority::kBestEffort, std::move(task),
1219 location);
1220 }
1221
1222 /**
1223 * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
1224 * expires.
1225 * Embedders should override PostDelayedTaskOnWorkerThreadImpl() instead of
1226 * CallDelayedOnWorkerThread().
1227 */
1228 V8_DEPRECATE_SOON("Use PostDelayedTaskOnWorkerThread instead.")
1230 std::unique_ptr<Task> task, double delay_in_seconds,
1231 SourceLocation location = SourceLocation::Current()) {
1232 PostDelayedTaskOnWorkerThreadImpl(TaskPriority::kUserVisible,
1233 std::move(task), delay_in_seconds,
1234 location);
1235 }
1236
1237 /**
1238 * Schedules a task to be invoked on a worker thread.
1239 * Embedders should override PostTaskOnWorkerThreadImpl() instead of
1240 * PostTaskOnWorkerThread().
1241 */
1243 TaskPriority priority, std::unique_ptr<Task> task,
1244 SourceLocation location = SourceLocation::Current()) {
1245 PostTaskOnWorkerThreadImpl(priority, std::move(task), location);
1246 }
1247
1248 /**
1249 * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
1250 * expires.
1251 * Embedders should override PostDelayedTaskOnWorkerThreadImpl() instead of
1252 * PostDelayedTaskOnWorkerThread().
1253 */
1255 TaskPriority priority, std::unique_ptr<Task> task,
1256 double delay_in_seconds,
1257 SourceLocation location = SourceLocation::Current()) {
1258 PostDelayedTaskOnWorkerThreadImpl(priority, std::move(task),
1259 delay_in_seconds, location);
1260 }
1261
1262 /**
1263 * Returns true if idle tasks are enabled for the given |isolate|.
1264 */
1265 virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
1266
1267 /**
1268 * Posts |job_task| to run in parallel. Returns a JobHandle associated with
1269 * the Job, which can be joined or canceled.
1270 * This avoids degenerate cases:
1271 * - Calling CallOnWorkerThread() for each work item, causing significant
1272 * overhead.
1273 * - Fixed number of CallOnWorkerThread() calls that split the work and might
1274 * run for a long time. This is problematic when many components post
1275 * "num cores" tasks and all expect to use all the cores. In these cases,
1276 * the scheduler lacks context to be fair to multiple same-priority requests
1277 * and/or ability to request lower priority work to yield when high priority
1278 * work comes in.
1279 * A canonical implementation of |job_task| looks like:
1280 * class MyJobTask : public JobTask {
1281 * public:
1282 * MyJobTask(...) : worker_queue_(...) {}
1283 * // JobTask:
1284 * void Run(JobDelegate* delegate) override {
1285 * while (!delegate->ShouldYield()) {
1286 * // Smallest unit of work.
1287 * auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
1288 * if (!work_item) return;
1289 * ProcessWork(work_item);
1290 * }
1291 * }
1292 *
1293 * size_t GetMaxConcurrency() const override {
1294 * return worker_queue_.GetSize(); // Thread safe.
1295 * }
1296 * };
1297 * auto handle = PostJob(TaskPriority::kUserVisible,
1298 * std::make_unique<MyJobTask>(...));
1299 * handle->Join();
1300 *
1301 * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
1302 * called while holding a lock that could be acquired by JobTask::Run or
1303 * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
1304 * because [1] JobTask::GetMaxConcurrency may be invoked while holding
1305 * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
1306 * if that lock is *never* held while calling back into JobHandle from any
1307 * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
1308 * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
1309 * (B=>JobHandle::foo=>B deadlock).
1310 * Embedders should override CreateJobImpl() instead of PostJob().
1311 */
1313 TaskPriority priority, std::unique_ptr<JobTask> job_task,
1314 SourceLocation location = SourceLocation::Current()) {
1315 auto handle = CreateJob(priority, std::move(job_task), location);
1316 handle->NotifyConcurrencyIncrease();
1317 return handle;
1318 }
1319
1320 /**
1321 * Creates and returns a JobHandle associated with a Job. Unlike PostJob(),
1322 * this doesn't immediately schedules |worker_task| to run; the Job is then
1323 * scheduled by calling either NotifyConcurrencyIncrease() or Join().
1324 *
1325 * A sufficient CreateJob() implementation that uses the default Job provided
1326 * in libplatform looks like:
1327 * std::unique_ptr<JobHandle> CreateJob(
1328 * TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
1329 * return v8::platform::NewDefaultJobHandle(
1330 * this, priority, std::move(job_task), NumberOfWorkerThreads());
1331 * }
1332 *
1333 * Embedders should override CreateJobImpl() instead of CreateJob().
1334 */
1336 TaskPriority priority, std::unique_ptr<JobTask> job_task,
1337 SourceLocation location = SourceLocation::Current()) {
1338 return CreateJobImpl(priority, std::move(job_task), location);
1339 }
1340
1341 /**
1342 * Instantiates a ScopedBlockingCall to annotate a scope that may/will block.
1343 */
1345 BlockingType blocking_type) {
1346 return nullptr;
1347 }
1348
1349 /**
1350 * Monotonically increasing time in seconds from an arbitrary fixed point in
1351 * the past. This function is expected to return at least
1352 * millisecond-precision values. For this reason,
1353 * it is recommended that the fixed point be no further in the past than
1354 * the epoch.
1355 **/
1356 virtual double MonotonicallyIncreasingTime() = 0;
1357
1358 /**
1359 * Current wall-clock time in milliseconds since epoch. Use
1360 * CurrentClockTimeMillisHighResolution() when higher precision is
1361 * required.
1362 */
1364 return static_cast<int64_t>(floor(CurrentClockTimeMillis()));
1365 }
1366
1367 /**
1368 * This function is deprecated and will be deleted. Use either
1369 * CurrentClockTimeMilliseconds() or
1370 * CurrentClockTimeMillisecondsHighResolution().
1371 */
1372 virtual double CurrentClockTimeMillis() = 0;
1373
1374 /**
1375 * Same as CurrentClockTimeMilliseconds(), but with more precision.
1376 */
1379 }
1380
1381 typedef void (*StackTracePrinter)();
1382
1383 /**
1384 * Returns a function pointer that print a stack trace of the current stack
1385 * on invocation. Disables printing of the stack trace if nullptr.
1386 */
1387 virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
1388
1389 /**
1390 * Returns an instance of a v8::TracingController. This must be non-nullptr.
1391 */
1393
1394 /**
1395 * Tells the embedder to generate and upload a crashdump during an unexpected
1396 * but non-critical scenario.
1397 */
1398 virtual void DumpWithoutCrashing() {}
1399
1400 /**
1401 * Allows the embedder to observe sections with high throughput allocation
1402 * operations.
1403 */
1406 static HighAllocationThroughputObserver default_observer;
1407 return &default_observer;
1408 }
1409
1410 protected:
1411 /**
1412 * Default implementation of current wall-clock time in milliseconds
1413 * since epoch. Useful for implementing |CurrentClockTimeMillis| if
1414 * nothing special needed.
1415 */
1417
1418 /**
1419 * Creates and returns a JobHandle associated with a Job.
1420 */
1422 TaskPriority priority, std::unique_ptr<JobTask> job_task,
1423 const SourceLocation& location) = 0;
1424
1425 /**
1426 * Schedules a task with |priority| to be invoked on a worker thread.
1427 */
1429 std::unique_ptr<Task> task,
1430 const SourceLocation& location) = 0;
1431
1432 /**
1433 * Schedules a task with |priority| to be invoked on a worker thread after
1434 * |delay_in_seconds| expires.
1435 */
1437 TaskPriority priority, std::unique_ptr<Task> task,
1438 double delay_in_seconds, const SourceLocation& location) = 0;
1439};
1440
1441} // namespace v8
1442
1443#endif // V8_V8_PLATFORM_H_
virtual void AppendAsTraceFormat(std::string *out) const =0
virtual ~ConvertableToTraceFormat()=default
virtual ~IdleTask()=default
virtual void Run(double deadline_in_seconds)=0
virtual bool IsJoiningThread() const =0
virtual uint8_t GetTaskId()=0
virtual bool ShouldYield()=0
virtual void NotifyConcurrencyIncrease()=0
virtual void UpdatePriority(TaskPriority new_priority)
virtual bool UpdatePriorityEnabled() const
virtual void CancelAndDetach()=0
virtual bool IsActive()=0
virtual bool IsValid()=0
virtual void Cancel()=0
virtual void NotifyConcurrencyIncrease()=0
virtual ~JobHandle()=default
virtual void Join()=0
virtual size_t GetMaxConcurrency(size_t worker_count) const =0
virtual void Run(JobDelegate *delegate)=0
virtual ~JobTask()=default
V8_WARN_UNUSED_RESULT constexpr AllocationHint WithMayGrow() const
V8_WARN_UNUSED_RESULT constexpr AllocationHint WithAddress(void *address) const
virtual std::unique_ptr< SharedMemoryMapping > RemapTo(void *new_address) const =0
virtual size_t GetSize() const =0
virtual void * GetMemory() const =0
virtual ~SharedMemory()=default
virtual void * GetMemory() const =0
virtual bool SealPages(void *address, size_t length)
virtual std::unique_ptr< SharedMemory > AllocateSharedPages(size_t length, const void *original_address)
virtual bool SetPermissions(void *address, size_t length, Permission permissions)=0
virtual bool CanAllocateSharedPages()
virtual bool DecommitPages(void *address, size_t size)=0
virtual bool ResizeAllocationAt(void *address, size_t old_length, size_t new_length, Permission permissions)
virtual void * AllocatePages(void *address, size_t length, size_t alignment, Permission permissions)=0
virtual bool RecommitPages(void *address, size_t length, Permission permissions)
virtual void * AllocatePages(size_t length, size_t alignment, Permission permissions, AllocationHint hint)
virtual void * GetRandomMmapAddr()=0
virtual size_t CommitPageSize()=0
virtual bool DiscardSystemPages(void *address, size_t size)
virtual bool ReserveForSharedMemoryMapping(void *address, size_t size)
virtual bool ReleasePages(void *address, size_t length, size_t new_length)=0
virtual void SetRandomMmapSeed(int64_t seed)=0
virtual ~PageAllocator()=default
virtual size_t AllocatePageSize()=0
virtual bool FreePages(void *address, size_t length)=0
virtual ~Platform()=default
std::unique_ptr< JobHandle > CreateJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, SourceLocation location=SourceLocation::Current())
virtual TracingController * GetTracingController()=0
virtual void PostDelayedTaskOnWorkerThreadImpl(TaskPriority priority, std::unique_ptr< Task > task, double delay_in_seconds, const SourceLocation &location)=0
static V8_EXPORT double SystemClockTimeMillis()
virtual double CurrentClockTimeMillisecondsHighResolution()
void CallDelayedOnWorkerThread(std::unique_ptr< Task > task, double delay_in_seconds, SourceLocation location=SourceLocation::Current())
virtual void PostTaskOnWorkerThreadImpl(TaskPriority priority, std::unique_ptr< Task > task, const SourceLocation &location)=0
virtual void DumpWithoutCrashing()
void CallOnWorkerThread(std::unique_ptr< Task > task, SourceLocation location=SourceLocation::Current())
void(* StackTracePrinter)()
virtual std::shared_ptr< v8::TaskRunner > GetForegroundTaskRunner(Isolate *isolate, TaskPriority priority)=0
virtual std::unique_ptr< JobHandle > CreateJobImpl(TaskPriority priority, std::unique_ptr< JobTask > job_task, const SourceLocation &location)=0
virtual StackTracePrinter GetStackTracePrinter()
virtual ThreadIsolatedAllocator * GetThreadIsolatedAllocator()
std::shared_ptr< v8::TaskRunner > GetForegroundTaskRunner(Isolate *isolate)
virtual double CurrentClockTimeMillis()=0
virtual void OnCriticalMemoryPressure()
void PostDelayedTaskOnWorkerThread(TaskPriority priority, std::unique_ptr< Task > task, double delay_in_seconds, SourceLocation location=SourceLocation::Current())
void CallLowPriorityTaskOnWorkerThread(std::unique_ptr< Task > task, SourceLocation location=SourceLocation::Current())
virtual HighAllocationThroughputObserver * GetHighAllocationThroughputObserver()
void PostTaskOnWorkerThread(TaskPriority priority, std::unique_ptr< Task > task, SourceLocation location=SourceLocation::Current())
virtual bool IdleTasksEnabled(Isolate *isolate)
virtual int64_t CurrentClockTimeMilliseconds()
virtual std::unique_ptr< ScopedBlockingCall > CreateBlockingScope(BlockingType blocking_type)
virtual double MonotonicallyIncreasingTime()=0
std::unique_ptr< JobHandle > PostJob(TaskPriority priority, std::unique_ptr< JobTask > job_task, SourceLocation location=SourceLocation::Current())
virtual int NumberOfWorkerThreads()=0
virtual PageAllocator * GetPageAllocator()
void CallBlockingTaskOnWorkerThread(std::unique_ptr< Task > task, SourceLocation location=SourceLocation::Current())
virtual ~ScopedBlockingCall()=default
virtual void Run()=0
virtual ~Task()=default
virtual bool NonNestableTasksEnabled() const
virtual void PostIdleTaskImpl(std::unique_ptr< IdleTask > task, const SourceLocation &location)
void PostNonNestableDelayedTask(std::unique_ptr< Task > task, double delay_in_seconds, SourceLocation location=SourceLocation::Current())
void PostNonNestableTask(std::unique_ptr< Task > task, SourceLocation location=SourceLocation::Current())
virtual void PostNonNestableDelayedTaskImpl(std::unique_ptr< Task > task, double delay_in_seconds, const SourceLocation &location)
virtual void PostNonNestableTaskImpl(std::unique_ptr< Task > task, const SourceLocation &location)
void PostIdleTask(std::unique_ptr< IdleTask > task, SourceLocation location=SourceLocation::Current())
virtual void PostTaskImpl(std::unique_ptr< Task > task, const SourceLocation &location)
TaskRunner(const TaskRunner &)=delete
TaskRunner()=default
virtual bool NonNestableDelayedTasksEnabled() const
virtual void PostDelayedTaskImpl(std::unique_ptr< Task > task, double delay_in_seconds, const SourceLocation &location)
virtual bool IdleTasksEnabled()=0
virtual ~TaskRunner()=default
TaskRunner & operator=(const TaskRunner &)=delete
void PostDelayedTask(std::unique_ptr< Task > task, double delay_in_seconds, SourceLocation location=SourceLocation::Current())
void PostTask(std::unique_ptr< Task > task, SourceLocation location=SourceLocation::Current())
Definition v8-platform.h:83
virtual ~ThreadIsolatedAllocator()=default
virtual void Free(void *object)=0
virtual int Pkey() const
virtual void * Allocate(size_t size)=0
virtual const uint8_t * GetCategoryGroupEnabled(const char *name)
virtual void AddTraceStateObserver(TraceStateObserver *)
virtual ~TracingController()=default
virtual uint64_t AddTraceEventWithTimestamp(char phase, const uint8_t *category_enabled_flag, const char *name, const char *scope, uint64_t id, uint64_t bind_id, int32_t num_args, const char **arg_names, const uint8_t *arg_types, const uint64_t *arg_values, std::unique_ptr< ConvertableToTraceFormat > *arg_convertables, unsigned int flags, int64_t timestamp)
virtual void UpdateTraceEventDuration(const uint8_t *category_enabled_flag, const char *name, uint64_t handle)
virtual uint64_t AddTraceEvent(char phase, const uint8_t *category_enabled_flag, const char *name, const char *scope, uint64_t id, uint64_t bind_id, int32_t num_args, const char **arg_names, const uint8_t *arg_types, const uint64_t *arg_values, std::unique_ptr< ConvertableToTraceFormat > *arg_convertables, unsigned int flags)
virtual void RemoveTraceStateObserver(TraceStateObserver *)
bool Contains(Address address) const
size_t page_size() const
virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address, size_t size)
static constexpr Address kNoHint
VirtualAddressSpace(size_t page_size, size_t allocation_granularity, Address base, size_t size, PagePermissions max_page_permissions)
virtual void SetRandomSeed(int64_t seed)=0
virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions(Address address, size_t size, PagePermissions permissions)=0
size_t allocation_granularity() const
virtual V8_WARN_UNUSED_RESULT Address AllocateSharedPages(Address hint, size_t size, PagePermissions permissions, PlatformSharedMemoryHandle handle, uint64_t offset)=0
virtual void FreeGuardRegion(Address address, size_t size)=0
virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address, size_t size)=0
virtual ~VirtualAddressSpace()=default
virtual V8_WARN_UNUSED_RESULT bool RecommitPages(Address address, size_t size, PagePermissions permissions)=0
virtual void FreeSharedPages(Address address, size_t size)=0
virtual V8_WARN_UNUSED_RESULT Address AllocatePages(Address hint, size_t size, size_t alignment, PagePermissions permissions)=0
virtual void FreePages(Address address, size_t size)=0
virtual std::unique_ptr< VirtualAddressSpace > AllocateSubspace(Address hint, size_t size, size_t alignment, PagePermissions max_page_permissions, std::optional< MemoryProtectionKeyId > key=std::nullopt)=0
virtual bool CanAllocateSubspaces()=0
Address base() const
virtual Address RandomPageAddress()=0
PagePermissions max_page_permissions() const
virtual std::optional< MemoryProtectionKeyId > ActiveMemoryProtectionKey()=0
virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address, size_t size)=0
PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd)
TaskPriority
Definition v8-platform.h:25
int FileDescriptorFromSharedMemoryHandle(PlatformSharedMemoryHandle handle)
PagePermissions
BlockingType
#define V8_EXPORT
Definition v8config.h:860
#define V8_DEPRECATE_SOON(message)
Definition v8config.h:627
#define V8_WARN_UNUSED_RESULT
Definition v8config.h:684