v8  7.8.279 (node 12.19.1)
V8 is Google's open source JavaScript engine
v8-profiler.h
Go to the documentation of this file.
1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_V8_PROFILER_H_
6 #define V8_V8_PROFILER_H_
7 
8 #include <limits.h>
9 #include <unordered_set>
10 #include <vector>
11 #include "v8.h" // NOLINT(build/include)
12 
13 /**
14  * Profiler support for the V8 JavaScript engine.
15  */
16 namespace v8 {
17 
18 class HeapGraphNode;
19 struct HeapStatsUpdate;
20 
21 using NativeObject = void*;
22 using SnapshotObjectId = uint32_t;
23 
25  int script_id;
26  size_t position;
27 };
28 
29 namespace internal {
30 class CpuProfile;
31 } // namespace internal
32 
33 } // namespace v8
34 
35 #ifdef V8_OS_WIN
36 template class V8_EXPORT std::vector<v8::CpuProfileDeoptFrame>;
37 #endif
38 
39 namespace v8 {
40 
42  /** A pointer to a static string owned by v8. */
43  const char* deopt_reason;
45 };
46 
47 } // namespace v8
48 
49 #ifdef V8_OS_WIN
50 template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
51 #endif
52 
53 namespace v8 {
54 
55 /**
56  * CpuProfileNode represents a node in a call graph.
57  */
59  public:
60  struct LineTick {
61  /** The 1-based number of the source line where the function originates. */
62  int line;
63 
64  /** The count of samples associated with the source line. */
65  unsigned int hit_count;
66  };
67 
68  // An annotation hinting at the source of a CpuProfileNode.
69  enum SourceType {
70  // User-supplied script with associated resource information.
71  kScript = 0,
72  // Native scripts and provided builtins.
73  kBuiltin = 1,
74  // Callbacks into native code.
75  kCallback = 2,
76  // VM-internal functions or state.
77  kInternal = 3,
78  // A node that failed to symbolize.
80  };
81 
82  /** Returns function name (empty string for anonymous functions.) */
84 
85  /**
86  * Returns function name (empty string for anonymous functions.)
87  * The string ownership is *not* passed to the caller. It stays valid until
88  * profile is deleted. The function is thread safe.
89  */
90  const char* GetFunctionNameStr() const;
91 
92  /** Returns id of the script where function is located. */
93  int GetScriptId() const;
94 
95  /** Returns resource name for script from where the function originates. */
97 
98  /**
99  * Returns resource name for script from where the function originates.
100  * The string ownership is *not* passed to the caller. It stays valid until
101  * profile is deleted. The function is thread safe.
102  */
103  const char* GetScriptResourceNameStr() const;
104 
105  /**
106  * Return true if the script from where the function originates is flagged as
107  * being shared cross-origin.
108  */
110 
111  /**
112  * Returns the number, 1-based, of the line where the function originates.
113  * kNoLineNumberInfo if no line number information is available.
114  */
115  int GetLineNumber() const;
116 
117  /**
118  * Returns 1-based number of the column where the function originates.
119  * kNoColumnNumberInfo if no column number information is available.
120  */
121  int GetColumnNumber() const;
122 
123  /**
124  * Returns the number of the function's source lines that collect the samples.
125  */
126  unsigned int GetHitLineCount() const;
127 
128  /** Returns the set of source lines that collect the samples.
129  * The caller allocates buffer and responsible for releasing it.
130  * True if all available entries are copied, otherwise false.
131  * The function copies nothing if buffer is not large enough.
132  */
133  bool GetLineTicks(LineTick* entries, unsigned int length) const;
134 
135  /** Returns bailout reason for the function
136  * if the optimization was disabled for it.
137  */
138  const char* GetBailoutReason() const;
139 
140  /**
141  * Returns the count of samples where the function was currently executing.
142  */
143  unsigned GetHitCount() const;
144 
145  /** Returns function entry UID. */
147  "Use GetScriptId, GetLineNumber, and GetColumnNumber instead.",
148  unsigned GetCallUid() const);
149 
150  /** Returns id of the node. The id is unique within the tree */
151  unsigned GetNodeId() const;
152 
153  /**
154  * Gets the type of the source which the node was captured from.
155  */
157 
158  /** Returns child nodes count of the node. */
159  int GetChildrenCount() const;
160 
161  /** Retrieves a child node by index. */
162  const CpuProfileNode* GetChild(int index) const;
163 
164  /** Retrieves the ancestor node, or null if the root. */
165  const CpuProfileNode* GetParent() const;
166 
167  /** Retrieves deopt infos for the node. */
168  const std::vector<CpuProfileDeoptInfo>& GetDeoptInfos() const;
169 
172 };
173 
174 
175 /**
176  * CpuProfile contains a CPU profile in a form of top-down call tree
177  * (from main() down to functions that do all the work).
178  */
180  public:
181  /** Returns CPU profile title. */
183 
184  /** Returns the root node of the top down call tree. */
186 
187  /**
188  * Returns number of samples recorded. The samples are not recorded unless
189  * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true.
190  */
191  int GetSamplesCount() const;
192 
193  /**
194  * Returns profile node corresponding to the top frame the sample at
195  * the given index.
196  */
197  const CpuProfileNode* GetSample(int index) const;
198 
199  /**
200  * Returns the timestamp of the sample. The timestamp is the number of
201  * microseconds since some unspecified starting point.
202  * The point is equal to the starting point used by GetStartTime.
203  */
204  int64_t GetSampleTimestamp(int index) const;
205 
206  /**
207  * Returns time when the profile recording was started (in microseconds)
208  * since some unspecified starting point.
209  */
210  int64_t GetStartTime() const;
211 
212  /**
213  * Returns time when the profile recording was stopped (in microseconds)
214  * since some unspecified starting point.
215  * The point is equal to the starting point used by GetStartTime.
216  */
217  int64_t GetEndTime() const;
218 
219  /**
220  * Deletes the profile and removes it from CpuProfiler's list.
221  * All pointers to nodes previously returned become invalid.
222  */
223  void Delete();
224 };
225 
227  // In the resulting CpuProfile tree, intermediate nodes in a stack trace
228  // (from the root to a leaf) will have line numbers that point to the start
229  // line of the function, rather than the line of the callsite of the child.
231  // In the resulting CpuProfile tree, nodes are separated based on the line
232  // number of their callsite in their parent.
234 };
235 
236 // Determines how names are derived for functions sampled.
238  // Use the immediate name of functions at compilation time.
240  // Use more verbose naming for functions without names, inferred from scope
241  // where possible.
243 };
244 
246  // Enables logging when a profile is active, and disables logging when all
247  // profiles are detached.
249  // Enables logging for the lifetime of the CpuProfiler. Calls to
250  // StartRecording are faster, at the expense of runtime overhead.
252 };
253 
254 /**
255  * Optional profiling attributes.
256  */
258  public:
259  // Indicates that the sample buffer size should not be explicitly limited.
260  static const unsigned kNoSampleLimit = UINT_MAX;
261 
262  /**
263  * \param mode Type of computation of stack frame line numbers.
264  * \param max_samples The maximum number of samples that should be recorded by
265  * the profiler. Samples obtained after this limit will be
266  * discarded.
267  * \param sampling_interval_us controls the profile-specific target
268  * sampling interval. The provided sampling
269  * interval will be snapped to the next lowest
270  * non-zero multiple of the profiler's sampling
271  * interval, set via SetSamplingInterval(). If
272  * zero, the sampling interval will be equal to
273  * the profiler's sampling interval.
274  */
277  unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0,
278  MaybeLocal<Context> filter_context = MaybeLocal<Context>());
279 
280  CpuProfilingMode mode() const { return mode_; }
281  unsigned max_samples() const { return max_samples_; }
282  int sampling_interval_us() const { return sampling_interval_us_; }
283 
284  private:
285  friend class internal::CpuProfile;
286 
287  bool has_filter_context() const { return !filter_context_.IsEmpty(); }
288  void* raw_filter_context() const;
289 
290  CpuProfilingMode mode_;
291  unsigned max_samples_;
292  int sampling_interval_us_;
293  CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
294 };
295 
296 /**
297  * Interface for controlling CPU profiling. Instance of the
298  * profiler can be created using v8::CpuProfiler::New method.
299  */
301  public:
302  /**
303  * Creates a new CPU profiler for the |isolate|. The isolate must be
304  * initialized. The profiler object must be disposed after use by calling
305  * |Dispose| method.
306  */
307  static CpuProfiler* New(Isolate* isolate,
310 
311  /**
312  * Synchronously collect current stack sample in all profilers attached to
313  * the |isolate|. The call does not affect number of ticks recorded for
314  * the current top node.
315  */
316  static void CollectSample(Isolate* isolate);
317 
318  /**
319  * Disposes the CPU profiler object.
320  */
321  void Dispose();
322 
323  /**
324  * Changes default CPU profiler sampling interval to the specified number
325  * of microseconds. Default interval is 1000us. This method must be called
326  * when there are no profiles being recorded.
327  */
328  void SetSamplingInterval(int us);
329 
330  /**
331  * Sets whether or not the profiler should prioritize consistency of sample
332  * periodicity on Windows. Disabling this can greatly reduce CPU usage, but
333  * may result in greater variance in sample timings from the platform's
334  * scheduler. Defaults to enabled. This method must be called when there are
335  * no profiles being recorded.
336  */
338 
339  /**
340  * Starts collecting a CPU profile. Title may be an empty string. Several
341  * profiles may be collected at once. Attempts to start collecting several
342  * profiles with the same title are silently ignored.
343  */
345 
346  /**
347  * Starts profiling with the same semantics as above, except with expanded
348  * parameters.
349  *
350  * |record_samples| parameter controls whether individual samples should
351  * be recorded in addition to the aggregated tree.
352  *
353  * |max_samples| controls the maximum number of samples that should be
354  * recorded by the profiler. Samples obtained after this limit will be
355  * discarded.
356  */
358  Local<String> title, CpuProfilingMode mode, bool record_samples = false,
359  unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
360  /**
361  * The same as StartProfiling above, but the CpuProfilingMode defaults to
362  * kLeafNodeLineNumbers mode, which was the previous default behavior of the
363  * profiler.
364  */
365  void StartProfiling(Local<String> title, bool record_samples = false);
366 
367  /**
368  * Stops collecting CPU profile with a given title and returns it.
369  * If the title given is empty, finishes the last profile started.
370  */
372 
373  /**
374  * Force collection of a sample. Must be called on the VM thread.
375  * Recording the forced sample does not contribute to the aggregated
376  * profile statistics.
377  */
378  V8_DEPRECATED("Use static CollectSample(Isolate*) instead.",
379  void CollectSample());
380 
381  /**
382  * Tells the profiler whether the embedder is idle.
383  */
384  V8_DEPRECATED("Use Isolate::SetIdle(bool) instead.",
385  void SetIdle(bool is_idle));
386 
387  /**
388  * Generate more detailed source positions to code objects. This results in
389  * better results when mapping profiling samples to script source.
390  */
392 
393  private:
394  CpuProfiler();
395  ~CpuProfiler();
396  CpuProfiler(const CpuProfiler&);
397  CpuProfiler& operator=(const CpuProfiler&);
398 };
399 
400 /**
401  * HeapSnapshotEdge represents a directed connection between heap
402  * graph nodes: from retainers to retained nodes.
403  */
405  public:
406  enum Type {
407  kContextVariable = 0, // A variable from a function context.
408  kElement = 1, // An element of an array.
409  kProperty = 2, // A named object property.
410  kInternal = 3, // A link that can't be accessed from JS,
411  // thus, its name isn't a real property name
412  // (e.g. parts of a ConsString).
413  kHidden = 4, // A link that is needed for proper sizes
414  // calculation, but may be hidden from user.
415  kShortcut = 5, // A link that must not be followed during
416  // sizes calculation.
417  kWeak = 6 // A weak reference (ignored by the GC).
418  };
419 
420  /** Returns edge type (see HeapGraphEdge::Type). */
421  Type GetType() const;
422 
423  /**
424  * Returns edge name. This can be a variable name, an element index, or
425  * a property name.
426  */
427  Local<Value> GetName() const;
428 
429  /** Returns origin node. */
430  const HeapGraphNode* GetFromNode() const;
431 
432  /** Returns destination node. */
433  const HeapGraphNode* GetToNode() const;
434 };
435 
436 
437 /**
438  * HeapGraphNode represents a node in a heap graph.
439  */
441  public:
442  enum Type {
443  kHidden = 0, // Hidden node, may be filtered when shown to user.
444  kArray = 1, // An array of elements.
445  kString = 2, // A string.
446  kObject = 3, // A JS object (except for arrays and strings).
447  kCode = 4, // Compiled code.
448  kClosure = 5, // Function closure.
449  kRegExp = 6, // RegExp.
450  kHeapNumber = 7, // Number stored in the heap.
451  kNative = 8, // Native object (not from V8 heap).
452  kSynthetic = 9, // Synthetic object, usually used for grouping
453  // snapshot items together.
454  kConsString = 10, // Concatenated string. A pair of pointers to strings.
455  kSlicedString = 11, // Sliced string. A fragment of another string.
456  kSymbol = 12, // A Symbol (ES6).
457  kBigInt = 13 // BigInt.
458  };
459 
460  /** Returns node type (see HeapGraphNode::Type). */
461  Type GetType() const;
462 
463  /**
464  * Returns node name. Depending on node's type this can be the name
465  * of the constructor (for objects), the name of the function (for
466  * closures), string value, or an empty string (for compiled code).
467  */
468  Local<String> GetName() const;
469 
470  /**
471  * Returns node id. For the same heap object, the id remains the same
472  * across all snapshots.
473  */
474  SnapshotObjectId GetId() const;
475 
476  /** Returns node's own size, in bytes. */
477  size_t GetShallowSize() const;
478 
479  /** Returns child nodes count of the node. */
480  int GetChildrenCount() const;
481 
482  /** Retrieves a child by index. */
483  const HeapGraphEdge* GetChild(int index) const;
484 };
485 
486 
487 /**
488  * An interface for exporting data from V8, using "push" model.
489  */
490 class V8_EXPORT OutputStream { // NOLINT
491  public:
492  enum WriteResult {
494  kAbort = 1
495  };
496  virtual ~OutputStream() = default;
497  /** Notify about the end of stream. */
498  virtual void EndOfStream() = 0;
499  /** Get preferred output chunk size. Called only once. */
500  virtual int GetChunkSize() { return 1024; }
501  /**
502  * Writes the next chunk of snapshot data into the stream. Writing
503  * can be stopped by returning kAbort as function result. EndOfStream
504  * will not be called in case writing was aborted.
505  */
506  virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
507  /**
508  * Writes the next chunk of heap stats data into the stream. Writing
509  * can be stopped by returning kAbort as function result. EndOfStream
510  * will not be called in case writing was aborted.
511  */
512  virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
513  return kAbort;
514  }
515 };
516 
517 
518 /**
519  * HeapSnapshots record the state of the JS heap at some moment.
520  */
522  public:
524  kJSON = 0 // See format description near 'Serialize' method.
525  };
526 
527  /** Returns the root node of the heap graph. */
528  const HeapGraphNode* GetRoot() const;
529 
530  /** Returns a node by its id. */
531  const HeapGraphNode* GetNodeById(SnapshotObjectId id) const;
532 
533  /** Returns total nodes count in the snapshot. */
534  int GetNodesCount() const;
535 
536  /** Returns a node by index. */
537  const HeapGraphNode* GetNode(int index) const;
538 
539  /** Returns a max seen JS object Id. */
540  SnapshotObjectId GetMaxSnapshotJSObjectId() const;
541 
542  /**
543  * Deletes the snapshot and removes it from HeapProfiler's list.
544  * All pointers to nodes, edges and paths previously returned become
545  * invalid.
546  */
547  void Delete();
548 
549  /**
550  * Prepare a serialized representation of the snapshot. The result
551  * is written into the stream provided in chunks of specified size.
552  * The total length of the serialized snapshot is unknown in
553  * advance, it can be roughly equal to JS heap size (that means,
554  * it can be really big - tens of megabytes).
555  *
556  * For the JSON format, heap contents are represented as an object
557  * with the following structure:
558  *
559  * {
560  * snapshot: {
561  * title: "...",
562  * uid: nnn,
563  * meta: { meta-info },
564  * node_count: nnn,
565  * edge_count: nnn
566  * },
567  * nodes: [nodes array],
568  * edges: [edges array],
569  * strings: [strings array]
570  * }
571  *
572  * Nodes reference strings, other nodes, and edges by their indexes
573  * in corresponding arrays.
574  */
575  void Serialize(OutputStream* stream,
576  SerializationFormat format = kJSON) const;
577 };
578 
579 
580 /**
581  * An interface for reporting progress and controlling long-running
582  * activities.
583  */
584 class V8_EXPORT ActivityControl { // NOLINT
585  public:
588  kAbort = 1
589  };
590  virtual ~ActivityControl() = default;
591  /**
592  * Notify about current progress. The activity can be stopped by
593  * returning kAbort as the callback result.
594  */
595  virtual ControlOption ReportProgressValue(int done, int total) = 0;
596 };
597 
598 
599 /**
600  * AllocationProfile is a sampled profile of allocations done by the program.
601  * This is structured as a call-graph.
602  */
604  public:
605  struct Allocation {
606  /**
607  * Size of the sampled allocation object.
608  */
609  size_t size;
610 
611  /**
612  * The number of objects of such size that were sampled.
613  */
614  unsigned int count;
615  };
616 
617  /**
618  * Represents a node in the call-graph.
619  */
620  struct Node {
621  /**
622  * Name of the function. May be empty for anonymous functions or if the
623  * script corresponding to this function has been unloaded.
624  */
626 
627  /**
628  * Name of the script containing the function. May be empty if the script
629  * name is not available, or if the script has been unloaded.
630  */
632 
633  /**
634  * id of the script where the function is located. May be equal to
635  * v8::UnboundScript::kNoScriptId in cases where the script doesn't exist.
636  */
638 
639  /**
640  * Start position of the function in the script.
641  */
643 
644  /**
645  * 1-indexed line number where the function starts. May be
646  * kNoLineNumberInfo if no line number information is available.
647  */
649 
650  /**
651  * 1-indexed column number where the function starts. May be
652  * kNoColumnNumberInfo if no line number information is available.
653  */
655 
656  /**
657  * Unique id of the node.
658  */
659  uint32_t node_id;
660 
661  /**
662  * List of callees called from this node for which we have sampled
663  * allocations. The lifetime of the children is scoped to the containing
664  * AllocationProfile.
665  */
666  std::vector<Node*> children;
667 
668  /**
669  * List of self allocations done by this node in the call-graph.
670  */
671  std::vector<Allocation> allocations;
672  };
673 
674  /**
675  * Represent a single sample recorded for an allocation.
676  */
677  struct Sample {
678  /**
679  * id of the node in the profile tree.
680  */
681  uint32_t node_id;
682 
683  /**
684  * Size of the sampled allocation object.
685  */
686  size_t size;
687 
688  /**
689  * The number of objects of such size that were sampled.
690  */
691  unsigned int count;
692 
693  /**
694  * Unique time-ordered id of the allocation sample. Can be used to track
695  * what samples were added or removed between two snapshots.
696  */
697  uint64_t sample_id;
698  };
699 
700  /**
701  * Returns the root node of the call-graph. The root node corresponds to an
702  * empty JS call-stack. The lifetime of the returned Node* is scoped to the
703  * containing AllocationProfile.
704  */
705  virtual Node* GetRootNode() = 0;
706  virtual const std::vector<Sample>& GetSamples() = 0;
707 
708  virtual ~AllocationProfile() = default;
709 
712 };
713 
714 /**
715  * An object graph consisting of embedder objects and V8 objects.
716  * Edges of the graph are strong references between the objects.
717  * The embedder can build this graph during heap snapshot generation
718  * to include the embedder objects in the heap snapshot.
719  * Usage:
720  * 1) Define derived class of EmbedderGraph::Node for embedder objects.
721  * 2) Set the build embedder graph callback on the heap profiler using
722  * HeapProfiler::AddBuildEmbedderGraphCallback.
723  * 3) In the callback use graph->AddEdge(node1, node2) to add an edge from
724  * node1 to node2.
725  * 4) To represent references from/to V8 object, construct V8 nodes using
726  * graph->V8Node(value).
727  */
729  public:
730  class Node {
731  public:
732  Node() = default;
733  virtual ~Node() = default;
734  virtual const char* Name() = 0;
735  virtual size_t SizeInBytes() = 0;
736  /**
737  * The corresponding V8 wrapper node if not null.
738  * During heap snapshot generation the embedder node and the V8 wrapper
739  * node will be merged into one node to simplify retaining paths.
740  */
741  virtual Node* WrapperNode() { return nullptr; }
742  virtual bool IsRootNode() { return false; }
743  /** Must return true for non-V8 nodes. */
744  virtual bool IsEmbedderNode() { return true; }
745  /**
746  * Optional name prefix. It is used in Chrome for tagging detached nodes.
747  */
748  virtual const char* NamePrefix() { return nullptr; }
749 
750  /**
751  * Returns the NativeObject that can be used for querying the
752  * |HeapSnapshot|.
753  */
754  virtual NativeObject GetNativeObject() { return nullptr; }
755 
756  Node(const Node&) = delete;
757  Node& operator=(const Node&) = delete;
758  };
759 
760  /**
761  * Returns a node corresponding to the given V8 value. Ownership is not
762  * transferred. The result pointer is valid while the graph is alive.
763  */
764  virtual Node* V8Node(const v8::Local<v8::Value>& value) = 0;
765 
766  /**
767  * Adds the given node to the graph and takes ownership of the node.
768  * Returns a raw pointer to the node that is valid while the graph is alive.
769  */
770  virtual Node* AddNode(std::unique_ptr<Node> node) = 0;
771 
772  /**
773  * Adds an edge that represents a strong reference from the given
774  * node |from| to the given node |to|. The nodes must be added to the graph
775  * before calling this function.
776  *
777  * If name is nullptr, the edge will have auto-increment indexes, otherwise
778  * it will be named accordingly.
779  */
780  virtual void AddEdge(Node* from, Node* to, const char* name = nullptr) = 0;
781 
782  virtual ~EmbedderGraph() = default;
783 };
784 
785 /**
786  * Interface for controlling heap profiling. Instance of the
787  * profiler can be retrieved using v8::Isolate::GetHeapProfiler.
788  */
790  public:
794  };
795 
796  /**
797  * Callback function invoked during heap snapshot generation to retrieve
798  * the embedder object graph. The callback should use graph->AddEdge(..) to
799  * add references between the objects.
800  * The callback must not trigger garbage collection in V8.
801  */
802  typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate,
803  v8::EmbedderGraph* graph,
804  void* data);
805 
806  /** Returns the number of snapshots taken. */
808 
809  /** Returns a snapshot by index. */
810  const HeapSnapshot* GetHeapSnapshot(int index);
811 
812  /**
813  * Returns SnapshotObjectId for a heap object referenced by |value| if
814  * it has been seen by the heap profiler, kUnknownObjectId otherwise.
815  */
816  SnapshotObjectId GetObjectId(Local<Value> value);
817 
818  /**
819  * Returns SnapshotObjectId for a native object referenced by |value| if it
820  * has been seen by the heap profiler, kUnknownObjectId otherwise.
821  */
822  SnapshotObjectId GetObjectId(NativeObject value);
823 
824  /**
825  * Returns heap object with given SnapshotObjectId if the object is alive,
826  * otherwise empty handle is returned.
827  */
828  Local<Value> FindObjectById(SnapshotObjectId id);
829 
830  /**
831  * Clears internal map from SnapshotObjectId to heap object. The new objects
832  * will not be added into it unless a heap snapshot is taken or heap object
833  * tracking is kicked off.
834  */
836 
837  /**
838  * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
839  * it in case heap profiler cannot find id for the object passed as
840  * parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
841  */
842  static const SnapshotObjectId kUnknownObjectId = 0;
843 
844  /**
845  * Callback interface for retrieving user friendly names of global objects.
846  */
848  public:
849  /**
850  * Returns name to be used in the heap snapshot for given node. Returned
851  * string must stay alive until snapshot collection is completed.
852  */
853  virtual const char* GetName(Local<Object> object) = 0;
854 
855  protected:
856  virtual ~ObjectNameResolver() = default;
857  };
858 
859  /**
860  * Takes a heap snapshot and returns it.
861  */
863  ActivityControl* control = nullptr,
864  ObjectNameResolver* global_object_name_resolver = nullptr);
865 
866  /**
867  * Starts tracking of heap objects population statistics. After calling
868  * this method, all heap objects relocations done by the garbage collector
869  * are being registered.
870  *
871  * |track_allocations| parameter controls whether stack trace of each
872  * allocation in the heap will be recorded and reported as part of
873  * HeapSnapshot.
874  */
875  void StartTrackingHeapObjects(bool track_allocations = false);
876 
877  /**
878  * Adds a new time interval entry to the aggregated statistics array. The
879  * time interval entry contains information on the current heap objects
880  * population size. The method also updates aggregated statistics and
881  * reports updates for all previous time intervals via the OutputStream
882  * object. Updates on each time interval are provided as a stream of the
883  * HeapStatsUpdate structure instances.
884  * If |timestamp_us| is supplied, timestamp of the new entry will be written
885  * into it. The return value of the function is the last seen heap object Id.
886  *
887  * StartTrackingHeapObjects must be called before the first call to this
888  * method.
889  */
890  SnapshotObjectId GetHeapStats(OutputStream* stream,
891  int64_t* timestamp_us = nullptr);
892 
893  /**
894  * Stops tracking of heap objects population statistics, cleans up all
895  * collected data. StartHeapObjectsTracking must be called again prior to
896  * calling GetHeapStats next time.
897  */
899 
900  /**
901  * Starts gathering a sampling heap profile. A sampling heap profile is
902  * similar to tcmalloc's heap profiler and Go's mprof. It samples object
903  * allocations and builds an online 'sampling' heap profile. At any point in
904  * time, this profile is expected to be a representative sample of objects
905  * currently live in the system. Each sampled allocation includes the stack
906  * trace at the time of allocation, which makes this really useful for memory
907  * leak detection.
908  *
909  * This mechanism is intended to be cheap enough that it can be used in
910  * production with minimal performance overhead.
911  *
912  * Allocations are sampled using a randomized Poisson process. On average, one
913  * allocation will be sampled every |sample_interval| bytes allocated. The
914  * |stack_depth| parameter controls the maximum number of stack frames to be
915  * captured on each allocation.
916  *
917  * NOTE: This is a proof-of-concept at this point. Right now we only sample
918  * newspace allocations. Support for paged space allocation (e.g. pre-tenured
919  * objects, large objects, code objects, etc.) and native allocations
920  * doesn't exist yet, but is anticipated in the future.
921  *
922  * Objects allocated before the sampling is started will not be included in
923  * the profile.
924  *
925  * Returns false if a sampling heap profiler is already running.
926  */
927  bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024,
928  int stack_depth = 16,
930 
931  /**
932  * Stops the sampling heap profile and discards the current profile.
933  */
935 
936  /**
937  * Returns the sampled profile of allocations allocated (and still live) since
938  * StartSamplingHeapProfiler was called. The ownership of the pointer is
939  * transferred to the caller. Returns nullptr if sampling heap profiler is not
940  * active.
941  */
943 
944  /**
945  * Deletes all snapshots taken. All previously returned pointers to
946  * snapshots and their contents become invalid after this call.
947  */
949 
951  void* data);
953  void* data);
954 
955  /**
956  * Default value of persistent handle class ID. Must not be used to
957  * define a class. Can be used to reset a class of a persistent
958  * handle.
959  */
960  static const uint16_t kPersistentHandleNoClassId = 0;
961 
962  private:
963  HeapProfiler();
964  ~HeapProfiler();
965  HeapProfiler(const HeapProfiler&);
966  HeapProfiler& operator=(const HeapProfiler&);
967 };
968 
969 /**
970  * A struct for exporting HeapStats data from V8, using "push" model.
971  * See HeapProfiler::GetHeapStats.
972  */
974  HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
975  : index(index), count(count), size(size) { }
976  uint32_t index; // Index of the time interval that was changed.
977  uint32_t count; // New value of count field for the interval with this index.
978  uint32_t size; // New value of size field for the interval with this index.
979 };
980 
981 #define CODE_EVENTS_LIST(V)
982  V(Builtin)
983  V(Callback)
984  V(Eval)
985  V(Function)
986  V(InterpretedFunction)
987  V(Handler)
988  V(BytecodeHandler)
989  V(LazyCompile)
990  V(RegExp)
991  V(Script)
992  V(Stub)
993 
994 /**
995  * Note that this enum may be extended in the future. Please include a default
996  * case if this enum is used in a switch statement.
997  */
999  kUnknownType = 0
1000 #define V(Name) , k##Name##Type
1002 #undef V
1003 };
1004 
1005 /**
1006  * Representation of a code creation event
1007  */
1009  public:
1010  uintptr_t GetCodeStartAddress();
1011  size_t GetCodeSize();
1016  /**
1017  * NOTE (mmarchini): We can't allocate objects in the heap when we collect
1018  * existing code, and both the code type and the comment are not stored in the
1019  * heap, so we return those as const char*.
1020  */
1022  const char* GetComment();
1023 
1024  static const char* GetCodeEventTypeName(CodeEventType code_event_type);
1025 };
1026 
1027 /**
1028  * Interface to listen to code creation events.
1029  */
1031  public:
1032  /**
1033  * Creates a new listener for the |isolate|. The isolate must be initialized.
1034  * The listener object must be disposed after use by calling |Dispose| method.
1035  * Multiple listeners can be created for the same isolate.
1036  */
1037  explicit CodeEventHandler(Isolate* isolate);
1038  virtual ~CodeEventHandler();
1039 
1040  virtual void Handle(CodeEvent* code_event) = 0;
1041 
1042  void Enable();
1043  void Disable();
1044 
1045  private:
1046  CodeEventHandler();
1047  CodeEventHandler(const CodeEventHandler&);
1048  CodeEventHandler& operator=(const CodeEventHandler&);
1049  void* internal_listener_;
1050 };
1051 
1052 } // namespace v8
1053 
1054 
1055 #endif // V8_V8_PROFILER_H_