Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-02-21 10:05:25

0001 // Copyright 2013 the V8 project authors. All rights reserved.
0002 // Use of this source code is governed by a BSD-style license that can be
0003 // found in the LICENSE file.
0004 
0005 #ifndef V8_V8_PLATFORM_H_
0006 #define V8_V8_PLATFORM_H_
0007 
0008 #include <math.h>
0009 #include <stddef.h>
0010 #include <stdint.h>
0011 #include <stdlib.h>  // For abort.
0012 
0013 #include <memory>
0014 #include <string>
0015 
0016 #include "v8-source-location.h"  // NOLINT(build/include_directory)
0017 #include "v8config.h"  // NOLINT(build/include_directory)
0018 
0019 namespace v8 {
0020 
0021 class Isolate;
0022 
0023 // Valid priorities supported by the task scheduling infrastructure.
0024 enum class TaskPriority : uint8_t {
0025   /**
0026    * Best effort tasks are not critical for performance of the application. The
0027    * platform implementation should preempt such tasks if higher priority tasks
0028    * arrive.
0029    */
0030   kBestEffort,
0031   /**
0032    * User visible tasks are long running background tasks that will
0033    * improve performance and memory usage of the application upon completion.
0034    * Example: background compilation and garbage collection.
0035    */
0036   kUserVisible,
0037   /**
0038    * User blocking tasks are highest priority tasks that block the execution
0039    * thread (e.g. major garbage collection). They must be finished as soon as
0040    * possible.
0041    */
0042   kUserBlocking,
0043   kMaxPriority = kUserBlocking
0044 };
0045 
0046 /**
0047  * A Task represents a unit of work.
0048  */
0049 class Task {
0050  public:
0051   virtual ~Task() = default;
0052 
0053   virtual void Run() = 0;
0054 };
0055 
0056 /**
0057  * An IdleTask represents a unit of work to be performed in idle time.
0058  * The Run method is invoked with an argument that specifies the deadline in
0059  * seconds returned by MonotonicallyIncreasingTime().
0060  * The idle task is expected to complete by this deadline.
0061  */
0062 class IdleTask {
0063  public:
0064   virtual ~IdleTask() = default;
0065   virtual void Run(double deadline_in_seconds) = 0;
0066 };
0067 
0068 /**
0069  * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
0070  * post tasks after the isolate gets destructed, but these tasks may not get
0071  * executed anymore. All tasks posted to a given TaskRunner will be invoked in
0072  * sequence. Tasks can be posted from any thread.
0073  */
0074 class TaskRunner {
0075  public:
0076   /**
0077    * Schedules a task to be invoked by this TaskRunner. The TaskRunner
0078    * implementation takes ownership of |task|.
0079    *
0080    * Embedders should override PostTaskImpl instead of this.
0081    */
0082   virtual void PostTask(std::unique_ptr<Task> task) {
0083     PostTaskImpl(std::move(task), SourceLocation::Current());
0084   }
0085 
0086   /**
0087    * Schedules a task to be invoked by this TaskRunner. The TaskRunner
0088    * implementation takes ownership of |task|. The |task| cannot be nested
0089    * within other task executions.
0090    *
0091    * Tasks which shouldn't be interleaved with JS execution must be posted with
0092    * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
0093    * embedder may process tasks in a callback which is called during JS
0094    * execution.
0095    *
0096    * In particular, tasks which execute JS must be non-nestable, since JS
0097    * execution is not allowed to nest.
0098    *
0099    * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
0100    *
0101    * Embedders should override PostNonNestableTaskImpl instead of this.
0102    */
0103   virtual void PostNonNestableTask(std::unique_ptr<Task> task) {
0104     PostNonNestableTaskImpl(std::move(task), SourceLocation::Current());
0105   }
0106 
0107   /**
0108    * Schedules a task to be invoked by this TaskRunner. The task is scheduled
0109    * after the given number of seconds |delay_in_seconds|. The TaskRunner
0110    * implementation takes ownership of |task|.
0111    *
0112    * Embedders should override PostDelayedTaskImpl instead of this.
0113    */
0114   virtual void PostDelayedTask(std::unique_ptr<Task> task,
0115                                double delay_in_seconds) {
0116     PostDelayedTaskImpl(std::move(task), delay_in_seconds,
0117                         SourceLocation::Current());
0118   }
0119 
0120   /**
0121    * Schedules a task to be invoked by this TaskRunner. The task is scheduled
0122    * after the given number of seconds |delay_in_seconds|. The TaskRunner
0123    * implementation takes ownership of |task|. The |task| cannot be nested
0124    * within other task executions.
0125    *
0126    * Tasks which shouldn't be interleaved with JS execution must be posted with
0127    * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
0128    * embedder may process tasks in a callback which is called during JS
0129    * execution.
0130    *
0131    * In particular, tasks which execute JS must be non-nestable, since JS
0132    * execution is not allowed to nest.
0133    *
0134    * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
0135    *
0136    * Embedders should override PostNonNestableDelayedTaskImpl instead of this.
0137    */
0138   virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
0139                                           double delay_in_seconds) {
0140     PostNonNestableDelayedTaskImpl(std::move(task), delay_in_seconds,
0141                                    SourceLocation::Current());
0142   }
0143 
0144   /**
0145    * Schedules an idle task to be invoked by this TaskRunner. The task is
0146    * scheduled when the embedder is idle. Requires that
0147    * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
0148    * relative to other task types and may be starved for an arbitrarily long
0149    * time if no idle time is available. The TaskRunner implementation takes
0150    * ownership of |task|.
0151    *
0152    * Embedders should override PostIdleTaskImpl instead of this.
0153    */
0154   virtual void PostIdleTask(std::unique_ptr<IdleTask> task) {
0155     PostIdleTaskImpl(std::move(task), SourceLocation::Current());
0156   }
0157 
0158   /**
0159    * Returns true if idle tasks are enabled for this TaskRunner.
0160    */
0161   virtual bool IdleTasksEnabled() = 0;
0162 
0163   /**
0164    * Returns true if non-nestable tasks are enabled for this TaskRunner.
0165    */
0166   virtual bool NonNestableTasksEnabled() const { return false; }
0167 
0168   /**
0169    * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
0170    */
0171   virtual bool NonNestableDelayedTasksEnabled() const { return false; }
0172 
0173   TaskRunner() = default;
0174   virtual ~TaskRunner() = default;
0175 
0176   TaskRunner(const TaskRunner&) = delete;
0177   TaskRunner& operator=(const TaskRunner&) = delete;
0178 
0179  protected:
0180   /**
0181    * Implementation of above methods with an additional `location` argument.
0182    */
0183   virtual void PostTaskImpl(std::unique_ptr<Task> task,
0184                             const SourceLocation& location) {}
0185   virtual void PostNonNestableTaskImpl(std::unique_ptr<Task> task,
0186                                        const SourceLocation& location) {}
0187   virtual void PostDelayedTaskImpl(std::unique_ptr<Task> task,
0188                                    double delay_in_seconds,
0189                                    const SourceLocation& location) {}
0190   virtual void PostNonNestableDelayedTaskImpl(std::unique_ptr<Task> task,
0191                                               double delay_in_seconds,
0192                                               const SourceLocation& location) {}
0193   virtual void PostIdleTaskImpl(std::unique_ptr<IdleTask> task,
0194                                 const SourceLocation& location) {}
0195 };
0196 
0197 /**
0198  * Delegate that's passed to Job's worker task, providing an entry point to
0199  * communicate with the scheduler.
0200  */
0201 class JobDelegate {
0202  public:
0203   /**
0204    * Returns true if this thread *must* return from the worker task on the
0205    * current thread ASAP. Workers should periodically invoke ShouldYield (or
0206    * YieldIfNeeded()) as often as is reasonable.
0207    * After this method returned true, ShouldYield must not be called again.
0208    */
0209   virtual bool ShouldYield() = 0;
0210 
0211   /**
0212    * Notifies the scheduler that max concurrency was increased, and the number
0213    * of worker should be adjusted accordingly. See Platform::PostJob() for more
0214    * details.
0215    */
0216   virtual void NotifyConcurrencyIncrease() = 0;
0217 
0218   /**
0219    * Returns a task_id unique among threads currently running this job, such
0220    * that GetTaskId() < worker count. To achieve this, the same task_id may be
0221    * reused by a different thread after a worker_task returns.
0222    */
0223   virtual uint8_t GetTaskId() = 0;
0224 
0225   /**
0226    * Returns true if the current task is called from the thread currently
0227    * running JobHandle::Join().
0228    */
0229   virtual bool IsJoiningThread() const = 0;
0230 };
0231 
0232 /**
0233  * Handle returned when posting a Job. Provides methods to control execution of
0234  * the posted Job.
0235  */
0236 class JobHandle {
0237  public:
0238   virtual ~JobHandle() = default;
0239 
0240   /**
0241    * Notifies the scheduler that max concurrency was increased, and the number
0242    * of worker should be adjusted accordingly. See Platform::PostJob() for more
0243    * details.
0244    */
0245   virtual void NotifyConcurrencyIncrease() = 0;
0246 
0247   /**
0248    * Contributes to the job on this thread. Doesn't return until all tasks have
0249    * completed and max concurrency becomes 0. When Join() is called and max
0250    * concurrency reaches 0, it should not increase again. This also promotes
0251    * this Job's priority to be at least as high as the calling thread's
0252    * priority.
0253    */
0254   virtual void Join() = 0;
0255 
0256   /**
0257    * Forces all existing workers to yield ASAP. Waits until they have all
0258    * returned from the Job's callback before returning.
0259    */
0260   virtual void Cancel() = 0;
0261 
0262   /*
0263    * Forces all existing workers to yield ASAP but doesn’t wait for them.
0264    * Warning, this is dangerous if the Job's callback is bound to or has access
0265    * to state which may be deleted after this call.
0266    */
0267   virtual void CancelAndDetach() = 0;
0268 
0269   /**
0270    * Returns true if there's any work pending or any worker running.
0271    */
0272   virtual bool IsActive() = 0;
0273 
0274   /**
0275    * Returns true if associated with a Job and other methods may be called.
0276    * Returns false after Join() or Cancel() was called. This may return true
0277    * even if no workers are running and IsCompleted() returns true
0278    */
0279   virtual bool IsValid() = 0;
0280 
0281   /**
0282    * Returns true if job priority can be changed.
0283    */
0284   virtual bool UpdatePriorityEnabled() const { return false; }
0285 
0286   /**
0287    *  Update this Job's priority.
0288    */
0289   virtual void UpdatePriority(TaskPriority new_priority) {}
0290 };
0291 
0292 /**
0293  * A JobTask represents work to run in parallel from Platform::PostJob().
0294  */
0295 class JobTask {
0296  public:
0297   virtual ~JobTask() = default;
0298 
0299   virtual void Run(JobDelegate* delegate) = 0;
0300 
0301   /**
0302    * Controls the maximum number of threads calling Run() concurrently, given
0303    * the number of threads currently assigned to this job and executing Run().
0304    * Run() is only invoked if the number of threads previously running Run() was
0305    * less than the value returned. In general, this should return the latest
0306    * number of incomplete work items (smallest unit of work) left to process,
0307    * including items that are currently in progress. |worker_count| is the
0308    * number of threads currently assigned to this job which some callers may
0309    * need to determine their return value. Since GetMaxConcurrency() is a leaf
0310    * function, it must not call back any JobHandle methods.
0311    */
0312   virtual size_t GetMaxConcurrency(size_t worker_count) const = 0;
0313 };
0314 
0315 /**
0316  * A "blocking call" refers to any call that causes the calling thread to wait
0317  * off-CPU. It includes but is not limited to calls that wait on synchronous
0318  * file I/O operations: read or write a file from disk, interact with a pipe or
0319  * a socket, rename or delete a file, enumerate files in a directory, etc.
0320  * Acquiring a low contention lock is not considered a blocking call.
0321  */
0322 
0323 /**
0324  * BlockingType indicates the likelihood that a blocking call will actually
0325  * block.
0326  */
0327 enum class BlockingType {
0328   // The call might block (e.g. file I/O that might hit in memory cache).
0329   kMayBlock,
0330   // The call will definitely block (e.g. cache already checked and now pinging
0331   // server synchronously).
0332   kWillBlock
0333 };
0334 
0335 /**
0336  * This class is instantiated with CreateBlockingScope() in every scope where a
0337  * blocking call is made and serves as a precise annotation of the scope that
0338  * may/will block. May be implemented by an embedder to adjust the thread count.
0339  * CPU usage should be minimal within that scope. ScopedBlockingCalls can be
0340  * nested.
0341  */
0342 class ScopedBlockingCall {
0343  public:
0344   virtual ~ScopedBlockingCall() = default;
0345 };
0346 
0347 /**
0348  * The interface represents complex arguments to trace events.
0349  */
0350 class ConvertableToTraceFormat {
0351  public:
0352   virtual ~ConvertableToTraceFormat() = default;
0353 
0354   /**
0355    * Append the class info to the provided |out| string. The appended
0356    * data must be a valid JSON object. Strings must be properly quoted, and
0357    * escaped. There is no processing applied to the content after it is
0358    * appended.
0359    */
0360   virtual void AppendAsTraceFormat(std::string* out) const = 0;
0361 };
0362 
0363 /**
0364  * V8 Tracing controller.
0365  *
0366  * Can be implemented by an embedder to record trace events from V8.
0367  *
0368  * Will become obsolete in Perfetto SDK build (v8_use_perfetto = true).
0369  */
0370 class TracingController {
0371  public:
0372   virtual ~TracingController() = default;
0373 
0374   // In Perfetto mode, trace events are written using Perfetto's Track Event
0375   // API directly without going through the embedder. However, it is still
0376   // possible to observe tracing being enabled and disabled.
0377 #if !defined(V8_USE_PERFETTO)
0378   /**
0379    * Called by TRACE_EVENT* macros, don't call this directly.
0380    * The name parameter is a category group for example:
0381    * TRACE_EVENT0("v8,parse", "V8.Parse")
0382    * The pointer returned points to a value with zero or more of the bits
0383    * defined in CategoryGroupEnabledFlags.
0384    **/
0385   virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
0386     static uint8_t no = 0;
0387     return &no;
0388   }
0389 
0390   /**
0391    * Adds a trace event to the platform tracing system. These function calls are
0392    * usually the result of a TRACE_* macro from trace_event_common.h when
0393    * tracing and the category of the particular trace are enabled. It is not
0394    * advisable to call these functions on their own; they are really only meant
0395    * to be used by the trace macros. The returned handle can be used by
0396    * UpdateTraceEventDuration to update the duration of COMPLETE events.
0397    */
0398   virtual uint64_t AddTraceEvent(
0399       char phase, const uint8_t* category_enabled_flag, const char* name,
0400       const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
0401       const char** arg_names, const uint8_t* arg_types,
0402       const uint64_t* arg_values,
0403       std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
0404       unsigned int flags) {
0405     return 0;
0406   }
0407   virtual uint64_t AddTraceEventWithTimestamp(
0408       char phase, const uint8_t* category_enabled_flag, const char* name,
0409       const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
0410       const char** arg_names, const uint8_t* arg_types,
0411       const uint64_t* arg_values,
0412       std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
0413       unsigned int flags, int64_t timestamp) {
0414     return 0;
0415   }
0416 
0417   /**
0418    * Sets the duration field of a COMPLETE trace event. It must be called with
0419    * the handle returned from AddTraceEvent().
0420    **/
0421   virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
0422                                         const char* name, uint64_t handle) {}
0423 #endif  // !defined(V8_USE_PERFETTO)
0424 
0425   class TraceStateObserver {
0426    public:
0427     virtual ~TraceStateObserver() = default;
0428     virtual void OnTraceEnabled() = 0;
0429     virtual void OnTraceDisabled() = 0;
0430   };
0431 
0432   /**
0433    * Adds tracing state change observer.
0434    * Does nothing in Perfetto SDK build (v8_use_perfetto = true).
0435    */
0436   virtual void AddTraceStateObserver(TraceStateObserver*) {}
0437 
0438   /**
0439    * Removes tracing state change observer.
0440    * Does nothing in Perfetto SDK build (v8_use_perfetto = true).
0441    */
0442   virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
0443 };
0444 
0445 /**
0446  * A V8 memory page allocator.
0447  *
0448  * Can be implemented by an embedder to manage large host OS allocations.
0449  */
0450 class PageAllocator {
0451  public:
0452   virtual ~PageAllocator() = default;
0453 
0454   /**
0455    * Gets the page granularity for AllocatePages and FreePages. Addresses and
0456    * lengths for those calls should be multiples of AllocatePageSize().
0457    */
0458   virtual size_t AllocatePageSize() = 0;
0459 
0460   /**
0461    * Gets the page granularity for SetPermissions and ReleasePages. Addresses
0462    * and lengths for those calls should be multiples of CommitPageSize().
0463    */
0464   virtual size_t CommitPageSize() = 0;
0465 
0466   /**
0467    * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
0468    * sequences of random mmap addresses.
0469    */
0470   virtual void SetRandomMmapSeed(int64_t seed) = 0;
0471 
0472   /**
0473    * Returns a randomized address, suitable for memory allocation under ASLR.
0474    * The address will be aligned to AllocatePageSize.
0475    */
0476   virtual void* GetRandomMmapAddr() = 0;
0477 
0478   /**
0479    * Memory permissions.
0480    */
0481   enum Permission {
0482     kNoAccess,
0483     kRead,
0484     kReadWrite,
0485     kReadWriteExecute,
0486     kReadExecute,
0487     // Set this when reserving memory that will later require kReadWriteExecute
0488     // permissions. The resulting behavior is platform-specific, currently
0489     // this is used to set the MAP_JIT flag on Apple Silicon.
0490     // TODO(jkummerow): Remove this when Wasm has a platform-independent
0491     // w^x implementation.
0492     // TODO(saelo): Remove this once all JIT pages are allocated through the
0493     // VirtualAddressSpace API.
0494     kNoAccessWillJitLater
0495   };
0496 
0497   /**
0498    * Allocates memory in range with the given alignment and permission.
0499    */
0500   virtual void* AllocatePages(void* address, size_t length, size_t alignment,
0501                               Permission permissions) = 0;
0502 
0503   /**
0504    * Frees memory in a range that was allocated by a call to AllocatePages.
0505    */
0506   virtual bool FreePages(void* address, size_t length) = 0;
0507 
0508   /**
0509    * Releases memory in a range that was allocated by a call to AllocatePages.
0510    */
0511   virtual bool ReleasePages(void* address, size_t length,
0512                             size_t new_length) = 0;
0513 
0514   /**
0515    * Sets permissions on pages in an allocated range.
0516    */
0517   virtual bool SetPermissions(void* address, size_t length,
0518                               Permission permissions) = 0;
0519 
0520   /**
0521    * Recommits discarded pages in the given range with given permissions.
0522    * Discarded pages must be recommitted with their original permissions
0523    * before they are used again.
0524    */
0525   virtual bool RecommitPages(void* address, size_t length,
0526                              Permission permissions) {
0527     // TODO(v8:12797): make it pure once it's implemented on Chromium side.
0528     return false;
0529   }
0530 
0531   /**
0532    * Frees memory in the given [address, address + size) range. address and size
0533    * should be operating system page-aligned. The next write to this
0534    * memory area brings the memory transparently back. This should be treated as
0535    * a hint to the OS that the pages are no longer needed. It does not guarantee
0536    * that the pages will be discarded immediately or at all.
0537    */
0538   virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
0539 
0540   /**
0541    * Decommits any wired memory pages in the given range, allowing the OS to
0542    * reclaim them, and marks the region as inacessible (kNoAccess). The address
0543    * range stays reserved and can be accessed again later by changing its
0544    * permissions. However, in that case the memory content is guaranteed to be
0545    * zero-initialized again. The memory must have been previously allocated by a
0546    * call to AllocatePages. Returns true on success, false otherwise.
0547    */
0548   virtual bool DecommitPages(void* address, size_t size) = 0;
0549 
0550   /**
0551    * INTERNAL ONLY: This interface has not been stabilised and may change
0552    * without notice from one release to another without being deprecated first.
0553    */
0554   class SharedMemoryMapping {
0555    public:
0556     // Implementations are expected to free the shared memory mapping in the
0557     // destructor.
0558     virtual ~SharedMemoryMapping() = default;
0559     virtual void* GetMemory() const = 0;
0560   };
0561 
0562   /**
0563    * INTERNAL ONLY: This interface has not been stabilised and may change
0564    * without notice from one release to another without being deprecated first.
0565    */
0566   class SharedMemory {
0567    public:
0568     // Implementations are expected to free the shared memory in the destructor.
0569     virtual ~SharedMemory() = default;
0570     virtual std::unique_ptr<SharedMemoryMapping> RemapTo(
0571         void* new_address) const = 0;
0572     virtual void* GetMemory() const = 0;
0573     virtual size_t GetSize() const = 0;
0574   };
0575 
0576   /**
0577    * INTERNAL ONLY: This interface has not been stabilised and may change
0578    * without notice from one release to another without being deprecated first.
0579    *
0580    * Reserve pages at a fixed address returning whether the reservation is
0581    * possible. The reserved memory is detached from the PageAllocator and so
0582    * should not be freed by it. It's intended for use with
0583    * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory.
0584    */
0585   virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) {
0586     return false;
0587   }
0588 
0589   /**
0590    * INTERNAL ONLY: This interface has not been stabilised and may change
0591    * without notice from one release to another without being deprecated first.
0592    *
0593    * Allocates shared memory pages. Not all PageAllocators need support this and
0594    * so this method need not be overridden.
0595    * Allocates a new read-only shared memory region of size |length| and copies
0596    * the memory at |original_address| into it.
0597    */
0598   virtual std::unique_ptr<SharedMemory> AllocateSharedPages(
0599       size_t length, const void* original_address) {
0600     return {};
0601   }
0602 
0603   /**
0604    * INTERNAL ONLY: This interface has not been stabilised and may change
0605    * without notice from one release to another without being deprecated first.
0606    *
0607    * If not overridden and changed to return true, V8 will not attempt to call
0608    * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages
0609    * and RemapSharedPages must also be overridden.
0610    */
0611   virtual bool CanAllocateSharedPages() { return false; }
0612 };
0613 
0614 /**
0615  * An allocator that uses per-thread permissions to protect the memory.
0616  *
0617  * The implementation is platform/hardware specific, e.g. using pkeys on x64.
0618  *
0619  * INTERNAL ONLY: This interface has not been stabilised and may change
0620  * without notice from one release to another without being deprecated first.
0621  */
0622 class ThreadIsolatedAllocator {
0623  public:
0624   virtual ~ThreadIsolatedAllocator() = default;
0625 
0626   virtual void* Allocate(size_t size) = 0;
0627 
0628   virtual void Free(void* object) = 0;
0629 
0630   enum class Type {
0631     kPkey,
0632   };
0633 
0634   virtual Type Type() const = 0;
0635 
0636   /**
0637    * Return the pkey used to implement the thread isolation if Type == kPkey.
0638    */
0639   virtual int Pkey() const { return -1; }
0640 
0641   /**
0642    * Per-thread permissions can be reset on signal handler entry. Even reading
0643    * ThreadIsolated memory will segfault in that case.
0644    * Call this function on signal handler entry to ensure that read permissions
0645    * are restored.
0646    */
0647   static void SetDefaultPermissionsForSignalHandler();
0648 };
0649 
0650 // Opaque type representing a handle to a shared memory region.
0651 using PlatformSharedMemoryHandle = intptr_t;
0652 static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1;
0653 
0654 // Conversion routines from the platform-dependent shared memory identifiers
0655 // into the opaque PlatformSharedMemoryHandle type. These use the underlying
0656 // types (e.g. unsigned int) instead of the typedef'd ones (e.g. mach_port_t)
0657 // to avoid pulling in large OS header files into this header file. Instead,
0658 // the users of these routines are expected to include the respecitve OS
0659 // headers in addition to this one.
0660 #if V8_OS_DARWIN
0661 // Convert between a shared memory handle and a mach_port_t referencing a memory
0662 // entry object.
0663 inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry(
0664     unsigned int port) {
0665   return static_cast<PlatformSharedMemoryHandle>(port);
0666 }
0667 inline unsigned int MachMemoryEntryFromSharedMemoryHandle(
0668     PlatformSharedMemoryHandle handle) {
0669   return static_cast<unsigned int>(handle);
0670 }
0671 #elif V8_OS_FUCHSIA
0672 // Convert between a shared memory handle and a zx_handle_t to a VMO.
0673 inline PlatformSharedMemoryHandle SharedMemoryHandleFromVMO(uint32_t handle) {
0674   return static_cast<PlatformSharedMemoryHandle>(handle);
0675 }
0676 inline uint32_t VMOFromSharedMemoryHandle(PlatformSharedMemoryHandle handle) {
0677   return static_cast<uint32_t>(handle);
0678 }
0679 #elif V8_OS_WIN
0680 // Convert between a shared memory handle and a Windows HANDLE to a file mapping
0681 // object.
0682 inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileMapping(
0683     void* handle) {
0684   return reinterpret_cast<PlatformSharedMemoryHandle>(handle);
0685 }
0686 inline void* FileMappingFromSharedMemoryHandle(
0687     PlatformSharedMemoryHandle handle) {
0688   return reinterpret_cast<void*>(handle);
0689 }
0690 #else
0691 // Convert between a shared memory handle and a file descriptor.
0692 inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd) {
0693   return static_cast<PlatformSharedMemoryHandle>(fd);
0694 }
0695 inline int FileDescriptorFromSharedMemoryHandle(
0696     PlatformSharedMemoryHandle handle) {
0697   return static_cast<int>(handle);
0698 }
0699 #endif
0700 
0701 /**
0702  * Possible permissions for memory pages.
0703  */
0704 enum class PagePermissions {
0705   kNoAccess,
0706   kRead,
0707   kReadWrite,
0708   kReadWriteExecute,
0709   kReadExecute,
0710 };
0711 
0712 /**
0713  * Class to manage a virtual memory address space.
0714  *
0715  * This class represents a contiguous region of virtual address space in which
0716  * sub-spaces and (private or shared) memory pages can be allocated, freed, and
0717  * modified. This interface is meant to eventually replace the PageAllocator
0718  * interface, and can be used as an alternative in the meantime.
0719  *
0720  * This API is not yet stable and may change without notice!
0721  */
0722 class VirtualAddressSpace {
0723  public:
0724   using Address = uintptr_t;
0725 
0726   VirtualAddressSpace(size_t page_size, size_t allocation_granularity,
0727                       Address base, size_t size,
0728                       PagePermissions max_page_permissions)
0729       : page_size_(page_size),
0730         allocation_granularity_(allocation_granularity),
0731         base_(base),
0732         size_(size),
0733         max_page_permissions_(max_page_permissions) {}
0734 
0735   virtual ~VirtualAddressSpace() = default;
0736 
0737   /**
0738    * The page size used inside this space. Guaranteed to be a power of two.
0739    * Used as granularity for all page-related operations except for allocation,
0740    * which use the allocation_granularity(), see below.
0741    *
0742    * \returns the page size in bytes.
0743    */
0744   size_t page_size() const { return page_size_; }
0745 
0746   /**
0747    * The granularity of page allocations and, by extension, of subspace
0748    * allocations. This is guaranteed to be a power of two and a multiple of the
0749    * page_size(). In practice, this is equal to the page size on most OSes, but
0750    * on Windows it is usually 64KB, while the page size is 4KB.
0751    *
0752    * \returns the allocation granularity in bytes.
0753    */
0754   size_t allocation_granularity() const { return allocation_granularity_; }
0755 
0756   /**
0757    * The base address of the address space managed by this instance.
0758    *
0759    * \returns the base address of this address space.
0760    */
0761   Address base() const { return base_; }
0762 
0763   /**
0764    * The size of the address space managed by this instance.
0765    *
0766    * \returns the size of this address space in bytes.
0767    */
0768   size_t size() const { return size_; }
0769 
0770   /**
0771    * The maximum page permissions that pages allocated inside this space can
0772    * obtain.
0773    *
0774    * \returns the maximum page permissions.
0775    */
0776   PagePermissions max_page_permissions() const { return max_page_permissions_; }
0777 
0778   /**
0779    * Whether the |address| is inside the address space managed by this instance.
0780    *
0781    * \returns true if it is inside the address space, false if not.
0782    */
0783   bool Contains(Address address) const {
0784     return (address >= base()) && (address < base() + size());
0785   }
0786 
0787   /**
0788    * Sets the random seed so that GetRandomPageAddress() will generate
0789    * repeatable sequences of random addresses.
0790    *
0791    * \param The seed for the PRNG.
0792    */
0793   virtual void SetRandomSeed(int64_t seed) = 0;
0794 
0795   /**
0796    * Returns a random address inside this address space, suitable for page
0797    * allocations hints.
0798    *
0799    * \returns a random address aligned to allocation_granularity().
0800    */
0801   virtual Address RandomPageAddress() = 0;
0802 
0803   /**
0804    * Allocates private memory pages with the given alignment and permissions.
0805    *
0806    * \param hint If nonzero, the allocation is attempted to be placed at the
0807    * given address first. If that fails, the allocation is attempted to be
0808    * placed elsewhere, possibly nearby, but that is not guaranteed. Specifying
0809    * zero for the hint always causes this function to choose a random address.
0810    * The hint, if specified, must be aligned to the specified alignment.
0811    *
0812    * \param size The size of the allocation in bytes. Must be a multiple of the
0813    * allocation_granularity().
0814    *
0815    * \param alignment The alignment of the allocation in bytes. Must be a
0816    * multiple of the allocation_granularity() and should be a power of two.
0817    *
0818    * \param permissions The page permissions of the newly allocated pages.
0819    *
0820    * \returns the start address of the allocated pages on success, zero on
0821    * failure.
0822    */
0823   static constexpr Address kNoHint = 0;
0824   virtual V8_WARN_UNUSED_RESULT Address
0825   AllocatePages(Address hint, size_t size, size_t alignment,
0826                 PagePermissions permissions) = 0;
0827 
0828   /**
0829    * Frees previously allocated pages.
0830    *
0831    * This function will terminate the process on failure as this implies a bug
0832    * in the client. As such, there is no return value.
0833    *
0834    * \param address The start address of the pages to free. This address must
0835    * have been obtained through a call to AllocatePages.
0836    *
0837    * \param size The size in bytes of the region to free. This must match the
0838    * size passed to AllocatePages when the pages were allocated.
0839    */
0840   virtual void FreePages(Address address, size_t size) = 0;
0841 
0842   /**
0843    * Sets permissions of all allocated pages in the given range.
0844    *
0845    * This operation can fail due to OOM, in which case false is returned. If
0846    * the operation fails for a reason other than OOM, this function will
0847    * terminate the process as this implies a bug in the client.
0848    *
0849    * \param address The start address of the range. Must be aligned to
0850    * page_size().
0851    *
0852    * \param size The size in bytes of the range. Must be a multiple
0853    * of page_size().
0854    *
0855    * \param permissions The new permissions for the range.
0856    *
0857    * \returns true on success, false on OOM.
0858    */
0859   virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions(
0860       Address address, size_t size, PagePermissions permissions) = 0;
0861 
0862   /**
0863    * Creates a guard region at the specified address.
0864    *
0865    * Guard regions are guaranteed to cause a fault when accessed and generally
0866    * do not count towards any memory consumption limits. Further, allocating
0867    * guard regions can usually not fail in subspaces if the region does not
0868    * overlap with another region, subspace, or page allocation.
0869    *
0870    * \param address The start address of the guard region. Must be aligned to
0871    * the allocation_granularity().
0872    *
0873    * \param size The size of the guard region in bytes. Must be a multiple of
0874    * the allocation_granularity().
0875    *
0876    * \returns true on success, false otherwise.
0877    */
0878   virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address,
0879                                                          size_t size) = 0;
0880 
0881   /**
0882    * Frees an existing guard region.
0883    *
0884    * This function will terminate the process on failure as this implies a bug
0885    * in the client. As such, there is no return value.
0886    *
0887    * \param address The start address of the guard region to free. This address
0888    * must have previously been used as address parameter in a successful
0889    * invocation of AllocateGuardRegion.
0890    *
0891    * \param size The size in bytes of the guard region to free. This must match
0892    * the size passed to AllocateGuardRegion when the region was created.
0893    */
0894   virtual void FreeGuardRegion(Address address, size_t size) = 0;
0895 
0896   /**
0897    * Allocates shared memory pages with the given permissions.
0898    *
0899    * \param hint Placement hint. See AllocatePages.
0900    *
0901    * \param size The size of the allocation in bytes. Must be a multiple of the
0902    * allocation_granularity().
0903    *
0904    * \param permissions The page permissions of the newly allocated pages.
0905    *
0906    * \param handle A platform-specific handle to a shared memory object. See
0907    * the SharedMemoryHandleFromX routines above for ways to obtain these.
0908    *
0909    * \param offset The offset in the shared memory object at which the mapping
0910    * should start. Must be a multiple of the allocation_granularity().
0911    *
0912    * \returns the start address of the allocated pages on success, zero on
0913    * failure.
0914    */
0915   virtual V8_WARN_UNUSED_RESULT Address
0916   AllocateSharedPages(Address hint, size_t size, PagePermissions permissions,
0917                       PlatformSharedMemoryHandle handle, uint64_t offset) = 0;
0918 
0919   /**
0920    * Frees previously allocated shared pages.
0921    *
0922    * This function will terminate the process on failure as this implies a bug
0923    * in the client. As such, there is no return value.
0924    *
0925    * \param address The start address of the pages to free. This address must
0926    * have been obtained through a call to AllocateSharedPages.
0927    *
0928    * \param size The size in bytes of the region to free. This must match the
0929    * size passed to AllocateSharedPages when the pages were allocated.
0930    */
0931   virtual void FreeSharedPages(Address address, size_t size) = 0;
0932 
0933   /**
0934    * Whether this instance can allocate subspaces or not.
0935    *
0936    * \returns true if subspaces can be allocated, false if not.
0937    */
0938   virtual bool CanAllocateSubspaces() = 0;
0939 
0940   /*
0941    * Allocate a subspace.
0942    *
0943    * The address space of a subspace stays reserved in the parent space for the
0944    * lifetime of the subspace. As such, it is guaranteed that page allocations
0945    * on the parent space cannot end up inside a subspace.
0946    *
0947    * \param hint Hints where the subspace should be allocated. See
0948    * AllocatePages() for more details.
0949    *
0950    * \param size The size in bytes of the subspace. Must be a multiple of the
0951    * allocation_granularity().
0952    *
0953    * \param alignment The alignment of the subspace in bytes. Must be a multiple
0954    * of the allocation_granularity() and should be a power of two.
0955    *
0956    * \param max_page_permissions The maximum permissions that pages allocated in
0957    * the subspace can obtain.
0958    *
0959    * \returns a new subspace or nullptr on failure.
0960    */
0961   virtual std::unique_ptr<VirtualAddressSpace> AllocateSubspace(
0962       Address hint, size_t size, size_t alignment,
0963       PagePermissions max_page_permissions) = 0;
0964 
0965   //
0966   // TODO(v8) maybe refactor the methods below before stabilizing the API. For
0967   // example by combining them into some form of page operation method that
0968   // takes a command enum as parameter.
0969   //
0970 
0971   /**
0972    * Recommits discarded pages in the given range with given permissions.
0973    * Discarded pages must be recommitted with their original permissions
0974    * before they are used again.
0975    *
0976    * \param address The start address of the range. Must be aligned to
0977    * page_size().
0978    *
0979    * \param size The size in bytes of the range. Must be a multiple
0980    * of page_size().
0981    *
0982    * \param permissions The permissions for the range that the pages must have.
0983    *
0984    * \returns true on success, false otherwise.
0985    */
0986   virtual V8_WARN_UNUSED_RESULT bool RecommitPages(
0987       Address address, size_t size, PagePermissions permissions) = 0;
0988 
0989   /**
0990    * Frees memory in the given [address, address + size) range. address and
0991    * size should be aligned to the page_size(). The next write to this memory
0992    * area brings the memory transparently back. This should be treated as a
0993    * hint to the OS that the pages are no longer needed. It does not guarantee
0994    * that the pages will be discarded immediately or at all.
0995    *
0996    * \returns true on success, false otherwise. Since this method is only a
0997    * hint, a successful invocation does not imply that pages have been removed.
0998    */
0999   virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address,
1000                                                         size_t size) {
1001     return true;
1002   }
1003   /**
1004    * Decommits any wired memory pages in the given range, allowing the OS to
1005    * reclaim them, and marks the region as inacessible (kNoAccess). The address
1006    * range stays reserved and can be accessed again later by changing its
1007    * permissions. However, in that case the memory content is guaranteed to be
1008    * zero-initialized again. The memory must have been previously allocated by a
1009    * call to AllocatePages.
1010    *
1011    * \returns true on success, false otherwise.
1012    */
1013   virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address,
1014                                                    size_t size) = 0;
1015 
1016  private:
1017   const size_t page_size_;
1018   const size_t allocation_granularity_;
1019   const Address base_;
1020   const size_t size_;
1021   const PagePermissions max_page_permissions_;
1022 };
1023 
1024 /**
1025  * V8 Allocator used for allocating zone backings.
1026  */
1027 class ZoneBackingAllocator {
1028  public:
1029   using MallocFn = void* (*)(size_t);
1030   using FreeFn = void (*)(void*);
1031 
1032   virtual MallocFn GetMallocFn() const { return ::malloc; }
1033   virtual FreeFn GetFreeFn() const { return ::free; }
1034 };
1035 
1036 /**
1037  * Observer used by V8 to notify the embedder about entering/leaving sections
1038  * with high throughput of malloc/free operations.
1039  */
1040 class HighAllocationThroughputObserver {
1041  public:
1042   virtual void EnterSection() {}
1043   virtual void LeaveSection() {}
1044 };
1045 
1046 /**
1047  * V8 Platform abstraction layer.
1048  *
1049  * The embedder has to provide an implementation of this interface before
1050  * initializing the rest of V8.
1051  */
1052 class Platform {
1053  public:
1054   virtual ~Platform() = default;
1055 
1056   /**
1057    * Allows the embedder to manage memory page allocations.
1058    * Returning nullptr will cause V8 to use the default page allocator.
1059    */
1060   virtual PageAllocator* GetPageAllocator() = 0;
1061 
1062   /**
1063    * Allows the embedder to provide an allocator that uses per-thread memory
1064    * permissions to protect allocations.
1065    * Returning nullptr will cause V8 to disable protections that rely on this
1066    * feature.
1067    */
1068   virtual ThreadIsolatedAllocator* GetThreadIsolatedAllocator() {
1069     return nullptr;
1070   }
1071 
1072   /**
1073    * Allows the embedder to specify a custom allocator used for zones.
1074    */
1075   virtual ZoneBackingAllocator* GetZoneBackingAllocator() {
1076     static ZoneBackingAllocator default_allocator;
1077     return &default_allocator;
1078   }
1079 
1080   /**
1081    * Enables the embedder to respond in cases where V8 can't allocate large
1082    * blocks of memory. V8 retries the failed allocation once after calling this
1083    * method. On success, execution continues; otherwise V8 exits with a fatal
1084    * error.
1085    * Embedder overrides of this function must NOT call back into V8.
1086    */
1087   virtual void OnCriticalMemoryPressure() {}
1088 
1089   /**
1090    * Gets the max number of worker threads that may be used to execute
1091    * concurrent work scheduled for any single TaskPriority by
1092    * Call(BlockingTask)OnWorkerThread() or PostJob(). This can be used to
1093    * estimate the number of tasks a work package should be split into. A return
1094    * value of 0 means that there are no worker threads available. Note that a
1095    * value of 0 won't prohibit V8 from posting tasks using |CallOnWorkerThread|.
1096    */
1097   virtual int NumberOfWorkerThreads() = 0;
1098 
1099   /**
1100    * Returns a TaskRunner which can be used to post a task on the foreground.
1101    * The TaskRunner's NonNestableTasksEnabled() must be true. This function
1102    * should only be called from a foreground thread.
1103    * TODO(chromium:1448758): Deprecate once |GetForegroundTaskRunner(Isolate*,
1104    * TaskPriority)| is ready.
1105    */
1106   virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
1107       Isolate* isolate) {
1108     return GetForegroundTaskRunner(isolate, TaskPriority::kUserBlocking);
1109   }
1110 
1111   /**
1112    * Returns a TaskRunner with a specific |priority| which can be used to post a
1113    * task on the foreground thread. The TaskRunner's NonNestableTasksEnabled()
1114    * must be true. This function should only be called from a foreground thread.
1115    * TODO(chromium:1448758): Make pure virtual once embedders implement it.
1116    */
1117   virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
1118       Isolate* isolate, TaskPriority priority) {
1119     return nullptr;
1120   }
1121 
1122   /**
1123    * Schedules a task to be invoked on a worker thread.
1124    * Embedders should override PostTaskOnWorkerThreadImpl() instead of
1125    * CallOnWorkerThread().
1126    */
1127   void CallOnWorkerThread(
1128       std::unique_ptr<Task> task,
1129       const SourceLocation& location = SourceLocation::Current()) {
1130     PostTaskOnWorkerThreadImpl(TaskPriority::kUserVisible, std::move(task),
1131                                location);
1132   }
1133 
1134   /**
1135    * Schedules a task that blocks the main thread to be invoked with
1136    * high-priority on a worker thread.
1137    * Embedders should override PostTaskOnWorkerThreadImpl() instead of
1138    * CallBlockingTaskOnWorkerThread().
1139    */
1140   void CallBlockingTaskOnWorkerThread(
1141       std::unique_ptr<Task> task,
1142       const SourceLocation& location = SourceLocation::Current()) {
1143     // Embedders may optionally override this to process these tasks in a high
1144     // priority pool.
1145     PostTaskOnWorkerThreadImpl(TaskPriority::kUserBlocking, std::move(task),
1146                                location);
1147   }
1148 
1149   /**
1150    * Schedules a task to be invoked with low-priority on a worker thread.
1151    * Embedders should override PostTaskOnWorkerThreadImpl() instead of
1152    * CallLowPriorityTaskOnWorkerThread().
1153    */
1154   void CallLowPriorityTaskOnWorkerThread(
1155       std::unique_ptr<Task> task,
1156       const SourceLocation& location = SourceLocation::Current()) {
1157     // Embedders may optionally override this to process these tasks in a low
1158     // priority pool.
1159     PostTaskOnWorkerThreadImpl(TaskPriority::kBestEffort, std::move(task),
1160                                location);
1161   }
1162 
1163   /**
1164    * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
1165    * expires.
1166    * Embedders should override PostDelayedTaskOnWorkerThreadImpl() instead of
1167    * CallDelayedOnWorkerThread().
1168    */
1169   void CallDelayedOnWorkerThread(
1170       std::unique_ptr<Task> task, double delay_in_seconds,
1171       const SourceLocation& location = SourceLocation::Current()) {
1172     PostDelayedTaskOnWorkerThreadImpl(TaskPriority::kUserVisible,
1173                                       std::move(task), delay_in_seconds,
1174                                       location);
1175   }
1176 
1177   /**
1178    * Returns true if idle tasks are enabled for the given |isolate|.
1179    */
1180   virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
1181 
1182   /**
1183    * Posts |job_task| to run in parallel. Returns a JobHandle associated with
1184    * the Job, which can be joined or canceled.
1185    * This avoids degenerate cases:
1186    * - Calling CallOnWorkerThread() for each work item, causing significant
1187    *   overhead.
1188    * - Fixed number of CallOnWorkerThread() calls that split the work and might
1189    *   run for a long time. This is problematic when many components post
1190    *   "num cores" tasks and all expect to use all the cores. In these cases,
1191    *   the scheduler lacks context to be fair to multiple same-priority requests
1192    *   and/or ability to request lower priority work to yield when high priority
1193    *   work comes in.
1194    * A canonical implementation of |job_task| looks like:
1195    * class MyJobTask : public JobTask {
1196    *  public:
1197    *   MyJobTask(...) : worker_queue_(...) {}
1198    *   // JobTask:
1199    *   void Run(JobDelegate* delegate) override {
1200    *     while (!delegate->ShouldYield()) {
1201    *       // Smallest unit of work.
1202    *       auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
1203    *       if (!work_item) return;
1204    *       ProcessWork(work_item);
1205    *     }
1206    *   }
1207    *
1208    *   size_t GetMaxConcurrency() const override {
1209    *     return worker_queue_.GetSize(); // Thread safe.
1210    *   }
1211    * };
1212    * auto handle = PostJob(TaskPriority::kUserVisible,
1213    *                       std::make_unique<MyJobTask>(...));
1214    * handle->Join();
1215    *
1216    * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
1217    * called while holding a lock that could be acquired by JobTask::Run or
1218    * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
1219    * because [1] JobTask::GetMaxConcurrency may be invoked while holding
1220    * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
1221    * if that lock is *never* held while calling back into JobHandle from any
1222    * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
1223    * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
1224    * (B=>JobHandle::foo=>B deadlock).
1225    * Embedders should override CreateJobImpl() instead of PostJob().
1226    */
1227   std::unique_ptr<JobHandle> PostJob(
1228       TaskPriority priority, std::unique_ptr<JobTask> job_task,
1229       const SourceLocation& location = SourceLocation::Current()) {
1230     auto handle = CreateJob(priority, std::move(job_task), location);
1231     handle->NotifyConcurrencyIncrease();
1232     return handle;
1233   }
1234 
1235   /**
1236    * Creates and returns a JobHandle associated with a Job. Unlike PostJob(),
1237    * this doesn't immediately schedules |worker_task| to run; the Job is then
1238    * scheduled by calling either NotifyConcurrencyIncrease() or Join().
1239    *
1240    * A sufficient CreateJob() implementation that uses the default Job provided
1241    * in libplatform looks like:
1242    *  std::unique_ptr<JobHandle> CreateJob(
1243    *      TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
1244    *    return v8::platform::NewDefaultJobHandle(
1245    *        this, priority, std::move(job_task), NumberOfWorkerThreads());
1246    * }
1247    *
1248    * Embedders should override CreateJobImpl() instead of CreateJob().
1249    */
1250   std::unique_ptr<JobHandle> CreateJob(
1251       TaskPriority priority, std::unique_ptr<JobTask> job_task,
1252       const SourceLocation& location = SourceLocation::Current()) {
1253     return CreateJobImpl(priority, std::move(job_task), location);
1254   }
1255 
1256   /**
1257    * Instantiates a ScopedBlockingCall to annotate a scope that may/will block.
1258    */
1259   virtual std::unique_ptr<ScopedBlockingCall> CreateBlockingScope(
1260       BlockingType blocking_type) {
1261     return nullptr;
1262   }
1263 
1264   /**
1265    * Monotonically increasing time in seconds from an arbitrary fixed point in
1266    * the past. This function is expected to return at least
1267    * millisecond-precision values. For this reason,
1268    * it is recommended that the fixed point be no further in the past than
1269    * the epoch.
1270    **/
1271   virtual double MonotonicallyIncreasingTime() = 0;
1272 
1273   /**
1274    * Current wall-clock time in milliseconds since epoch. Use
1275    * CurrentClockTimeMillisHighResolution() when higher precision is
1276    * required.
1277    */
1278   virtual int64_t CurrentClockTimeMilliseconds() {
1279     return static_cast<int64_t>(floor(CurrentClockTimeMillis()));
1280   }
1281 
1282   /**
1283    * This function is deprecated and will be deleted. Use either
1284    * CurrentClockTimeMilliseconds() or
1285    * CurrentClockTimeMillisecondsHighResolution().
1286    */
1287   virtual double CurrentClockTimeMillis() = 0;
1288 
1289   /**
1290    * Same as CurrentClockTimeMilliseconds(), but with more precision.
1291    */
1292   virtual double CurrentClockTimeMillisecondsHighResolution() {
1293     return CurrentClockTimeMillis();
1294   }
1295 
1296   typedef void (*StackTracePrinter)();
1297 
1298   /**
1299    * Returns a function pointer that print a stack trace of the current stack
1300    * on invocation. Disables printing of the stack trace if nullptr.
1301    */
1302   virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
1303 
1304   /**
1305    * Returns an instance of a v8::TracingController. This must be non-nullptr.
1306    */
1307   virtual TracingController* GetTracingController() = 0;
1308 
1309   /**
1310    * Tells the embedder to generate and upload a crashdump during an unexpected
1311    * but non-critical scenario.
1312    */
1313   virtual void DumpWithoutCrashing() {}
1314 
1315   /**
1316    * Allows the embedder to observe sections with high throughput allocation
1317    * operations.
1318    */
1319   virtual HighAllocationThroughputObserver*
1320   GetHighAllocationThroughputObserver() {
1321     static HighAllocationThroughputObserver default_observer;
1322     return &default_observer;
1323   }
1324 
1325  protected:
1326   /**
1327    * Default implementation of current wall-clock time in milliseconds
1328    * since epoch. Useful for implementing |CurrentClockTimeMillis| if
1329    * nothing special needed.
1330    */
1331   V8_EXPORT static double SystemClockTimeMillis();
1332 
1333   /**
1334    * Creates and returns a JobHandle associated with a Job.
1335    */
1336   virtual std::unique_ptr<JobHandle> CreateJobImpl(
1337       TaskPriority priority, std::unique_ptr<JobTask> job_task,
1338       const SourceLocation& location) = 0;
1339 
1340   /**
1341    * Schedules a task with |priority| to be invoked on a worker thread.
1342    */
1343   virtual void PostTaskOnWorkerThreadImpl(TaskPriority priority,
1344                                           std::unique_ptr<Task> task,
1345                                           const SourceLocation& location) = 0;
1346 
1347   /**
1348    * Schedules a task with |priority| to be invoked on a worker thread after
1349    * |delay_in_seconds| expires.
1350    */
1351   virtual void PostDelayedTaskOnWorkerThreadImpl(
1352       TaskPriority priority, std::unique_ptr<Task> task,
1353       double delay_in_seconds, const SourceLocation& location) = 0;
1354 };
1355 
1356 }  // namespace v8
1357 
1358 #endif  // V8_V8_PLATFORM_H_