v8  10.1.124 (node 18.2.0)
V8 is Google's open source JavaScript engine
v8-profiler.h
Go to the documentation of this file.
1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_V8_PROFILER_H_
6 #define V8_V8_PROFILER_H_
7 
8 #include <limits.h>
9 
10 #include <memory>
11 #include <unordered_set>
12 #include <vector>
13 
14 #include "v8-local-handle.h" // NOLINT(build/include_directory)
15 #include "v8-message.h" // NOLINT(build/include_directory)
16 #include "v8-persistent-handle.h" // NOLINT(build/include_directory)
17 
18 /**
19  * Profiler support for the V8 JavaScript engine.
20  */
21 namespace v8 {
22 
23 enum class EmbedderStateTag : uint8_t;
24 class HeapGraphNode;
25 struct HeapStatsUpdate;
26 class Object;
27 enum StateTag : int;
28 
29 using NativeObject = void*;
30 using SnapshotObjectId = uint32_t;
31 
33  int script_id;
34  size_t position;
35 };
36 
37 namespace internal {
38 class CpuProfile;
39 } // namespace internal
40 
41 } // namespace v8
42 
43 #ifdef V8_OS_WIN
44 template class V8_EXPORT std::vector<v8::CpuProfileDeoptFrame>;
45 #endif
46 
47 namespace v8 {
48 
50  /** A pointer to a static string owned by v8. */
51  const char* deopt_reason;
53 };
54 
55 } // namespace v8
56 
57 #ifdef V8_OS_WIN
58 template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
59 #endif
60 
61 namespace v8 {
62 
63 /**
64  * CpuProfileNode represents a node in a call graph.
65  */
67  public:
68  struct LineTick {
69  /** The 1-based number of the source line where the function originates. */
70  int line;
71 
72  /** The count of samples associated with the source line. */
73  unsigned int hit_count;
74  };
75 
76  // An annotation hinting at the source of a CpuProfileNode.
77  enum SourceType {
78  // User-supplied script with associated resource information.
79  kScript = 0,
80  // Native scripts and provided builtins.
81  kBuiltin = 1,
82  // Callbacks into native code.
83  kCallback = 2,
84  // VM-internal functions or state.
85  kInternal = 3,
86  // A node that failed to symbolize.
88  };
89 
90  /** Returns function name (empty string for anonymous functions.) */
92 
93  /**
94  * Returns function name (empty string for anonymous functions.)
95  * The string ownership is *not* passed to the caller. It stays valid until
96  * profile is deleted. The function is thread safe.
97  */
98  const char* GetFunctionNameStr() const;
99 
100  /** Returns id of the script where function is located. */
101  int GetScriptId() const;
102 
103  /** Returns resource name for script from where the function originates. */
105 
106  /**
107  * Returns resource name for script from where the function originates.
108  * The string ownership is *not* passed to the caller. It stays valid until
109  * profile is deleted. The function is thread safe.
110  */
111  const char* GetScriptResourceNameStr() const;
112 
113  /**
114  * Return true if the script from where the function originates is flagged as
115  * being shared cross-origin.
116  */
118 
119  /**
120  * Returns the number, 1-based, of the line where the function originates.
121  * kNoLineNumberInfo if no line number information is available.
122  */
123  int GetLineNumber() const;
124 
125  /**
126  * Returns 1-based number of the column where the function originates.
127  * kNoColumnNumberInfo if no column number information is available.
128  */
129  int GetColumnNumber() const;
130 
131  /**
132  * Returns the number of the function's source lines that collect the samples.
133  */
134  unsigned int GetHitLineCount() const;
135 
136  /** Returns the set of source lines that collect the samples.
137  * The caller allocates buffer and responsible for releasing it.
138  * True if all available entries are copied, otherwise false.
139  * The function copies nothing if buffer is not large enough.
140  */
141  bool GetLineTicks(LineTick* entries, unsigned int length) const;
142 
143  /** Returns bailout reason for the function
144  * if the optimization was disabled for it.
145  */
146  const char* GetBailoutReason() const;
147 
148  /**
149  * Returns the count of samples where the function was currently executing.
150  */
151  unsigned GetHitCount() const;
152 
153  /** Returns id of the node. The id is unique within the tree */
154  unsigned GetNodeId() const;
155 
156  /**
157  * Gets the type of the source which the node was captured from.
158  */
160 
161  /** Returns child nodes count of the node. */
162  int GetChildrenCount() const;
163 
164  /** Retrieves a child node by index. */
165  const CpuProfileNode* GetChild(int index) const;
166 
167  /** Retrieves the ancestor node, or null if the root. */
168  const CpuProfileNode* GetParent() const;
169 
170  /** Retrieves deopt infos for the node. */
171  const std::vector<CpuProfileDeoptInfo>& GetDeoptInfos() const;
172 
175 };
176 
177 
178 /**
179  * CpuProfile contains a CPU profile in a form of top-down call tree
180  * (from main() down to functions that do all the work).
181  */
183  public:
184  /** Returns CPU profile title. */
186 
187  /** Returns the root node of the top down call tree. */
189 
190  /**
191  * Returns number of samples recorded. The samples are not recorded unless
192  * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true.
193  */
194  int GetSamplesCount() const;
195 
196  /**
197  * Returns profile node corresponding to the top frame the sample at
198  * the given index.
199  */
200  const CpuProfileNode* GetSample(int index) const;
201 
202  /**
203  * Returns the timestamp of the sample. The timestamp is the number of
204  * microseconds since some unspecified starting point.
205  * The point is equal to the starting point used by GetStartTime.
206  */
207  int64_t GetSampleTimestamp(int index) const;
208 
209  /**
210  * Returns time when the profile recording was started (in microseconds)
211  * since some unspecified starting point.
212  */
213  int64_t GetStartTime() const;
214 
215  /**
216  * Returns state of the vm when sample was captured.
217  */
218  StateTag GetSampleState(int index) const;
219 
220  /**
221  * Returns state of the embedder when sample was captured.
222  */
224 
225  /**
226  * Returns time when the profile recording was stopped (in microseconds)
227  * since some unspecified starting point.
228  * The point is equal to the starting point used by GetStartTime.
229  */
230  int64_t GetEndTime() const;
231 
232  /**
233  * Deletes the profile and removes it from CpuProfiler's list.
234  * All pointers to nodes previously returned become invalid.
235  */
236  void Delete();
237 };
238 
240  // In the resulting CpuProfile tree, intermediate nodes in a stack trace
241  // (from the root to a leaf) will have line numbers that point to the start
242  // line of the function, rather than the line of the callsite of the child.
244  // In the resulting CpuProfile tree, nodes are separated based on the line
245  // number of their callsite in their parent.
247 };
248 
249 // Determines how names are derived for functions sampled.
251  // Use the immediate name of functions at compilation time.
253  // Use more verbose naming for functions without names, inferred from scope
254  // where possible.
256 };
257 
259  // Enables logging when a profile is active, and disables logging when all
260  // profiles are detached.
262  // Enables logging for the lifetime of the CpuProfiler. Calls to
263  // StartRecording are faster, at the expense of runtime overhead.
265 };
266 
267 // Enum for returning profiling status. Once StartProfiling is called,
268 // we want to return to clients whether the profiling was able to start
269 // correctly, or return a descriptive error.
270 enum class CpuProfilingStatus {
271  kStarted,
274 };
275 
276 /**
277  * Delegate for when max samples reached and samples are discarded.
278  */
280  public:
282 
283  virtual ~DiscardedSamplesDelegate() = default;
284  virtual void Notify() = 0;
285 };
286 
287 /**
288  * Optional profiling attributes.
289  */
291  public:
292  // Indicates that the sample buffer size should not be explicitly limited.
293  static const unsigned kNoSampleLimit = UINT_MAX;
294 
295  /**
296  * \param mode Type of computation of stack frame line numbers.
297  * \param max_samples The maximum number of samples that should be recorded by
298  * the profiler. Samples obtained after this limit will be
299  * discarded.
300  * \param sampling_interval_us controls the profile-specific target
301  * sampling interval. The provided sampling
302  * interval will be snapped to the next lowest
303  * non-zero multiple of the profiler's sampling
304  * interval, set via SetSamplingInterval(). If
305  * zero, the sampling interval will be equal to
306  * the profiler's sampling interval.
307  * \param filter_context If specified, profiles will only contain frames
308  * using this context. Other frames will be elided.
309  */
312  unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0,
313  MaybeLocal<Context> filter_context = MaybeLocal<Context>());
314 
315  CpuProfilingMode mode() const { return mode_; }
316  unsigned max_samples() const { return max_samples_; }
317  int sampling_interval_us() const { return sampling_interval_us_; }
318 
319  private:
320  friend class internal::CpuProfile;
321 
322  bool has_filter_context() const { return !filter_context_.IsEmpty(); }
323  void* raw_filter_context() const;
324 
325  CpuProfilingMode mode_;
326  unsigned max_samples_;
327  int sampling_interval_us_;
328  CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
329 };
330 
331 /**
332  * Interface for controlling CPU profiling. Instance of the
333  * profiler can be created using v8::CpuProfiler::New method.
334  */
336  public:
337  /**
338  * Creates a new CPU profiler for the |isolate|. The isolate must be
339  * initialized. The profiler object must be disposed after use by calling
340  * |Dispose| method.
341  */
342  static CpuProfiler* New(Isolate* isolate,
345 
346  /**
347  * Synchronously collect current stack sample in all profilers attached to
348  * the |isolate|. The call does not affect number of ticks recorded for
349  * the current top node.
350  */
351  static void CollectSample(Isolate* isolate);
352 
353  /**
354  * Disposes the CPU profiler object.
355  */
356  void Dispose();
357 
358  /**
359  * Changes default CPU profiler sampling interval to the specified number
360  * of microseconds. Default interval is 1000us. This method must be called
361  * when there are no profiles being recorded.
362  */
363  void SetSamplingInterval(int us);
364 
365  /**
366  * Sets whether or not the profiler should prioritize consistency of sample
367  * periodicity on Windows. Disabling this can greatly reduce CPU usage, but
368  * may result in greater variance in sample timings from the platform's
369  * scheduler. Defaults to enabled. This method must be called when there are
370  * no profiles being recorded.
371  */
373 
374  /**
375  * Starts collecting a CPU profile. Title may be an empty string. Several
376  * profiles may be collected at once. Attempts to start collecting several
377  * profiles with the same title are silently ignored.
378  */
380  Local<String> title, CpuProfilingOptions options,
381  std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
382 
383  /**
384  * Starts profiling with the same semantics as above, except with expanded
385  * parameters.
386  *
387  * |record_samples| parameter controls whether individual samples should
388  * be recorded in addition to the aggregated tree.
389  *
390  * |max_samples| controls the maximum number of samples that should be
391  * recorded by the profiler. Samples obtained after this limit will be
392  * discarded.
393  */
395  Local<String> title, CpuProfilingMode mode, bool record_samples = false,
396  unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
397  /**
398  * The same as StartProfiling above, but the CpuProfilingMode defaults to
399  * kLeafNodeLineNumbers mode, which was the previous default behavior of the
400  * profiler.
401  */
403  bool record_samples = false);
404 
405  /**
406  * Stops collecting CPU profile with a given title and returns it.
407  * If the title given is empty, finishes the last profile started.
408  */
410 
411  /**
412  * Generate more detailed source positions to code objects. This results in
413  * better results when mapping profiling samples to script source.
414  */
416 
417  private:
418  CpuProfiler();
419  ~CpuProfiler();
420  CpuProfiler(const CpuProfiler&);
421  CpuProfiler& operator=(const CpuProfiler&);
422 };
423 
424 /**
425  * HeapSnapshotEdge represents a directed connection between heap
426  * graph nodes: from retainers to retained nodes.
427  */
429  public:
430  enum Type {
431  kContextVariable = 0, // A variable from a function context.
432  kElement = 1, // An element of an array.
433  kProperty = 2, // A named object property.
434  kInternal = 3, // A link that can't be accessed from JS,
435  // thus, its name isn't a real property name
436  // (e.g. parts of a ConsString).
437  kHidden = 4, // A link that is needed for proper sizes
438  // calculation, but may be hidden from user.
439  kShortcut = 5, // A link that must not be followed during
440  // sizes calculation.
441  kWeak = 6 // A weak reference (ignored by the GC).
442  };
443 
444  /** Returns edge type (see HeapGraphEdge::Type). */
445  Type GetType() const;
446 
447  /**
448  * Returns edge name. This can be a variable name, an element index, or
449  * a property name.
450  */
451  Local<Value> GetName() const;
452 
453  /** Returns origin node. */
454  const HeapGraphNode* GetFromNode() const;
455 
456  /** Returns destination node. */
457  const HeapGraphNode* GetToNode() const;
458 };
459 
460 
461 /**
462  * HeapGraphNode represents a node in a heap graph.
463  */
465  public:
466  enum Type {
467  kHidden = 0, // Hidden node, may be filtered when shown to user.
468  kArray = 1, // An array of elements.
469  kString = 2, // A string.
470  kObject = 3, // A JS object (except for arrays and strings).
471  kCode = 4, // Compiled code.
472  kClosure = 5, // Function closure.
473  kRegExp = 6, // RegExp.
474  kHeapNumber = 7, // Number stored in the heap.
475  kNative = 8, // Native object (not from V8 heap).
476  kSynthetic = 9, // Synthetic object, usually used for grouping
477  // snapshot items together.
478  kConsString = 10, // Concatenated string. A pair of pointers to strings.
479  kSlicedString = 11, // Sliced string. A fragment of another string.
480  kSymbol = 12, // A Symbol (ES6).
481  kBigInt = 13 // BigInt.
482  };
483 
484  /** Returns node type (see HeapGraphNode::Type). */
485  Type GetType() const;
486 
487  /**
488  * Returns node name. Depending on node's type this can be the name
489  * of the constructor (for objects), the name of the function (for
490  * closures), string value, or an empty string (for compiled code).
491  */
492  Local<String> GetName() const;
493 
494  /**
495  * Returns node id. For the same heap object, the id remains the same
496  * across all snapshots.
497  */
498  SnapshotObjectId GetId() const;
499 
500  /** Returns node's own size, in bytes. */
501  size_t GetShallowSize() const;
502 
503  /** Returns child nodes count of the node. */
504  int GetChildrenCount() const;
505 
506  /** Retrieves a child by index. */
507  const HeapGraphEdge* GetChild(int index) const;
508 };
509 
510 
511 /**
512  * An interface for exporting data from V8, using "push" model.
513  */
515  public:
516  enum WriteResult {
518  kAbort = 1
519  };
520  virtual ~OutputStream() = default;
521  /** Notify about the end of stream. */
522  virtual void EndOfStream() = 0;
523  /** Get preferred output chunk size. Called only once. */
524  virtual int GetChunkSize() { return 1024; }
525  /**
526  * Writes the next chunk of snapshot data into the stream. Writing
527  * can be stopped by returning kAbort as function result. EndOfStream
528  * will not be called in case writing was aborted.
529  */
530  virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
531  /**
532  * Writes the next chunk of heap stats data into the stream. Writing
533  * can be stopped by returning kAbort as function result. EndOfStream
534  * will not be called in case writing was aborted.
535  */
536  virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
537  return kAbort;
538  }
539 };
540 
541 /**
542  * HeapSnapshots record the state of the JS heap at some moment.
543  */
545  public:
547  kJSON = 0 // See format description near 'Serialize' method.
548  };
549 
550  /** Returns the root node of the heap graph. */
551  const HeapGraphNode* GetRoot() const;
552 
553  /** Returns a node by its id. */
554  const HeapGraphNode* GetNodeById(SnapshotObjectId id) const;
555 
556  /** Returns total nodes count in the snapshot. */
557  int GetNodesCount() const;
558 
559  /** Returns a node by index. */
560  const HeapGraphNode* GetNode(int index) const;
561 
562  /** Returns a max seen JS object Id. */
563  SnapshotObjectId GetMaxSnapshotJSObjectId() const;
564 
565  /**
566  * Deletes the snapshot and removes it from HeapProfiler's list.
567  * All pointers to nodes, edges and paths previously returned become
568  * invalid.
569  */
570  void Delete();
571 
572  /**
573  * Prepare a serialized representation of the snapshot. The result
574  * is written into the stream provided in chunks of specified size.
575  * The total length of the serialized snapshot is unknown in
576  * advance, it can be roughly equal to JS heap size (that means,
577  * it can be really big - tens of megabytes).
578  *
579  * For the JSON format, heap contents are represented as an object
580  * with the following structure:
581  *
582  * {
583  * snapshot: {
584  * title: "...",
585  * uid: nnn,
586  * meta: { meta-info },
587  * node_count: nnn,
588  * edge_count: nnn
589  * },
590  * nodes: [nodes array],
591  * edges: [edges array],
592  * strings: [strings array]
593  * }
594  *
595  * Nodes reference strings, other nodes, and edges by their indexes
596  * in corresponding arrays.
597  */
598  void Serialize(OutputStream* stream,
599  SerializationFormat format = kJSON) const;
600 };
601 
602 
603 /**
604  * An interface for reporting progress and controlling long-running
605  * activities.
606  */
608  public:
611  kAbort = 1
612  };
613  virtual ~ActivityControl() = default;
614  /**
615  * Notify about current progress. The activity can be stopped by
616  * returning kAbort as the callback result.
617  */
618  virtual ControlOption ReportProgressValue(uint32_t done, uint32_t total) = 0;
619 };
620 
621 /**
622  * AllocationProfile is a sampled profile of allocations done by the program.
623  * This is structured as a call-graph.
624  */
626  public:
627  struct Allocation {
628  /**
629  * Size of the sampled allocation object.
630  */
631  size_t size;
632 
633  /**
634  * The number of objects of such size that were sampled.
635  */
636  unsigned int count;
637  };
638 
639  /**
640  * Represents a node in the call-graph.
641  */
642  struct Node {
643  /**
644  * Name of the function. May be empty for anonymous functions or if the
645  * script corresponding to this function has been unloaded.
646  */
648 
649  /**
650  * Name of the script containing the function. May be empty if the script
651  * name is not available, or if the script has been unloaded.
652  */
654 
655  /**
656  * id of the script where the function is located. May be equal to
657  * v8::UnboundScript::kNoScriptId in cases where the script doesn't exist.
658  */
660 
661  /**
662  * Start position of the function in the script.
663  */
665 
666  /**
667  * 1-indexed line number where the function starts. May be
668  * kNoLineNumberInfo if no line number information is available.
669  */
671 
672  /**
673  * 1-indexed column number where the function starts. May be
674  * kNoColumnNumberInfo if no line number information is available.
675  */
677 
678  /**
679  * Unique id of the node.
680  */
681  uint32_t node_id;
682 
683  /**
684  * List of callees called from this node for which we have sampled
685  * allocations. The lifetime of the children is scoped to the containing
686  * AllocationProfile.
687  */
688  std::vector<Node*> children;
689 
690  /**
691  * List of self allocations done by this node in the call-graph.
692  */
693  std::vector<Allocation> allocations;
694  };
695 
696  /**
697  * Represent a single sample recorded for an allocation.
698  */
699  struct Sample {
700  /**
701  * id of the node in the profile tree.
702  */
703  uint32_t node_id;
704 
705  /**
706  * Size of the sampled allocation object.
707  */
708  size_t size;
709 
710  /**
711  * The number of objects of such size that were sampled.
712  */
713  unsigned int count;
714 
715  /**
716  * Unique time-ordered id of the allocation sample. Can be used to track
717  * what samples were added or removed between two snapshots.
718  */
719  uint64_t sample_id;
720  };
721 
722  /**
723  * Returns the root node of the call-graph. The root node corresponds to an
724  * empty JS call-stack. The lifetime of the returned Node* is scoped to the
725  * containing AllocationProfile.
726  */
727  virtual Node* GetRootNode() = 0;
728  virtual const std::vector<Sample>& GetSamples() = 0;
729 
730  virtual ~AllocationProfile() = default;
731 
734 };
735 
736 /**
737  * An object graph consisting of embedder objects and V8 objects.
738  * Edges of the graph are strong references between the objects.
739  * The embedder can build this graph during heap snapshot generation
740  * to include the embedder objects in the heap snapshot.
741  * Usage:
742  * 1) Define derived class of EmbedderGraph::Node for embedder objects.
743  * 2) Set the build embedder graph callback on the heap profiler using
744  * HeapProfiler::AddBuildEmbedderGraphCallback.
745  * 3) In the callback use graph->AddEdge(node1, node2) to add an edge from
746  * node1 to node2.
747  * 4) To represent references from/to V8 object, construct V8 nodes using
748  * graph->V8Node(value).
749  */
751  public:
752  class Node {
753  public:
754  /**
755  * Detachedness specifies whether an object is attached or detached from the
756  * main application state. While unkown in general, there may be objects
757  * that specifically know their state. V8 passes this information along in
758  * the snapshot. Users of the snapshot may use it to annotate the object
759  * graph.
760  */
761  enum class Detachedness : uint8_t {
762  kUnknown = 0,
763  kAttached = 1,
764  kDetached = 2,
765  };
766 
767  Node() = default;
768  virtual ~Node() = default;
769  virtual const char* Name() = 0;
770  virtual size_t SizeInBytes() = 0;
771  /**
772  * The corresponding V8 wrapper node if not null.
773  * During heap snapshot generation the embedder node and the V8 wrapper
774  * node will be merged into one node to simplify retaining paths.
775  */
776  virtual Node* WrapperNode() { return nullptr; }
777  virtual bool IsRootNode() { return false; }
778  /** Must return true for non-V8 nodes. */
779  virtual bool IsEmbedderNode() { return true; }
780  /**
781  * Optional name prefix. It is used in Chrome for tagging detached nodes.
782  */
783  virtual const char* NamePrefix() { return nullptr; }
784 
785  /**
786  * Returns the NativeObject that can be used for querying the
787  * |HeapSnapshot|.
788  */
789  virtual NativeObject GetNativeObject() { return nullptr; }
790 
791  /**
792  * Detachedness state of a given object. While unkown in general, there may
793  * be objects that specifically know their state. V8 passes this information
794  * along in the snapshot. Users of the snapshot may use it to annotate the
795  * object graph.
796  */
798 
799  Node(const Node&) = delete;
800  Node& operator=(const Node&) = delete;
801  };
802 
803  /**
804  * Returns a node corresponding to the given V8 value. Ownership is not
805  * transferred. The result pointer is valid while the graph is alive.
806  */
807  virtual Node* V8Node(const v8::Local<v8::Value>& value) = 0;
808 
809  /**
810  * Adds the given node to the graph and takes ownership of the node.
811  * Returns a raw pointer to the node that is valid while the graph is alive.
812  */
813  virtual Node* AddNode(std::unique_ptr<Node> node) = 0;
814 
815  /**
816  * Adds an edge that represents a strong reference from the given
817  * node |from| to the given node |to|. The nodes must be added to the graph
818  * before calling this function.
819  *
820  * If name is nullptr, the edge will have auto-increment indexes, otherwise
821  * it will be named accordingly.
822  */
823  virtual void AddEdge(Node* from, Node* to, const char* name = nullptr) = 0;
824 
825  virtual ~EmbedderGraph() = default;
826 };
827 
828 /**
829  * Interface for controlling heap profiling. Instance of the
830  * profiler can be retrieved using v8::Isolate::GetHeapProfiler.
831  */
833  public:
837  };
838 
839  /**
840  * Callback function invoked during heap snapshot generation to retrieve
841  * the embedder object graph. The callback should use graph->AddEdge(..) to
842  * add references between the objects.
843  * The callback must not trigger garbage collection in V8.
844  */
845  typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate,
846  v8::EmbedderGraph* graph,
847  void* data);
848 
849  /**
850  * Callback function invoked during heap snapshot generation to retrieve
851  * the detachedness state of an object referenced by a TracedReference.
852  *
853  * The callback takes Local<Value> as parameter to allow the embedder to
854  * unpack the TracedReference into a Local and reuse that Local for different
855  * purposes.
856  */
857  using GetDetachednessCallback = EmbedderGraph::Node::Detachedness (*)(
858  v8::Isolate* isolate, const v8::Local<v8::Value>& v8_value,
859  uint16_t class_id, void* data);
860 
861  /** Returns the number of snapshots taken. */
863 
864  /** Returns a snapshot by index. */
865  const HeapSnapshot* GetHeapSnapshot(int index);
866 
867  /**
868  * Returns SnapshotObjectId for a heap object referenced by |value| if
869  * it has been seen by the heap profiler, kUnknownObjectId otherwise.
870  */
871  SnapshotObjectId GetObjectId(Local<Value> value);
872 
873  /**
874  * Returns SnapshotObjectId for a native object referenced by |value| if it
875  * has been seen by the heap profiler, kUnknownObjectId otherwise.
876  */
877  SnapshotObjectId GetObjectId(NativeObject value);
878 
879  /**
880  * Returns heap object with given SnapshotObjectId if the object is alive,
881  * otherwise empty handle is returned.
882  */
883  Local<Value> FindObjectById(SnapshotObjectId id);
884 
885  /**
886  * Clears internal map from SnapshotObjectId to heap object. The new objects
887  * will not be added into it unless a heap snapshot is taken or heap object
888  * tracking is kicked off.
889  */
891 
892  /**
893  * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
894  * it in case heap profiler cannot find id for the object passed as
895  * parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
896  */
897  static const SnapshotObjectId kUnknownObjectId = 0;
898 
899  /**
900  * Callback interface for retrieving user friendly names of global objects.
901  */
903  public:
904  /**
905  * Returns name to be used in the heap snapshot for given node. Returned
906  * string must stay alive until snapshot collection is completed.
907  */
908  virtual const char* GetName(Local<Object> object) = 0;
909 
910  protected:
911  virtual ~ObjectNameResolver() = default;
912  };
913 
914  /**
915  * Takes a heap snapshot and returns it.
916  */
918  ActivityControl* control = nullptr,
919  ObjectNameResolver* global_object_name_resolver = nullptr,
920  bool treat_global_objects_as_roots = true,
921  bool capture_numeric_value = false);
922 
923  /**
924  * Starts tracking of heap objects population statistics. After calling
925  * this method, all heap objects relocations done by the garbage collector
926  * are being registered.
927  *
928  * |track_allocations| parameter controls whether stack trace of each
929  * allocation in the heap will be recorded and reported as part of
930  * HeapSnapshot.
931  */
932  void StartTrackingHeapObjects(bool track_allocations = false);
933 
934  /**
935  * Adds a new time interval entry to the aggregated statistics array. The
936  * time interval entry contains information on the current heap objects
937  * population size. The method also updates aggregated statistics and
938  * reports updates for all previous time intervals via the OutputStream
939  * object. Updates on each time interval are provided as a stream of the
940  * HeapStatsUpdate structure instances.
941  * If |timestamp_us| is supplied, timestamp of the new entry will be written
942  * into it. The return value of the function is the last seen heap object Id.
943  *
944  * StartTrackingHeapObjects must be called before the first call to this
945  * method.
946  */
947  SnapshotObjectId GetHeapStats(OutputStream* stream,
948  int64_t* timestamp_us = nullptr);
949 
950  /**
951  * Stops tracking of heap objects population statistics, cleans up all
952  * collected data. StartHeapObjectsTracking must be called again prior to
953  * calling GetHeapStats next time.
954  */
956 
957  /**
958  * Starts gathering a sampling heap profile. A sampling heap profile is
959  * similar to tcmalloc's heap profiler and Go's mprof. It samples object
960  * allocations and builds an online 'sampling' heap profile. At any point in
961  * time, this profile is expected to be a representative sample of objects
962  * currently live in the system. Each sampled allocation includes the stack
963  * trace at the time of allocation, which makes this really useful for memory
964  * leak detection.
965  *
966  * This mechanism is intended to be cheap enough that it can be used in
967  * production with minimal performance overhead.
968  *
969  * Allocations are sampled using a randomized Poisson process. On average, one
970  * allocation will be sampled every |sample_interval| bytes allocated. The
971  * |stack_depth| parameter controls the maximum number of stack frames to be
972  * captured on each allocation.
973  *
974  * NOTE: This is a proof-of-concept at this point. Right now we only sample
975  * newspace allocations. Support for paged space allocation (e.g. pre-tenured
976  * objects, large objects, code objects, etc.) and native allocations
977  * doesn't exist yet, but is anticipated in the future.
978  *
979  * Objects allocated before the sampling is started will not be included in
980  * the profile.
981  *
982  * Returns false if a sampling heap profiler is already running.
983  */
984  bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024,
985  int stack_depth = 16,
987 
988  /**
989  * Stops the sampling heap profile and discards the current profile.
990  */
992 
993  /**
994  * Returns the sampled profile of allocations allocated (and still live) since
995  * StartSamplingHeapProfiler was called. The ownership of the pointer is
996  * transferred to the caller. Returns nullptr if sampling heap profiler is not
997  * active.
998  */
1000 
1001  /**
1002  * Deletes all snapshots taken. All previously returned pointers to
1003  * snapshots and their contents become invalid after this call.
1004  */
1006 
1008  void* data);
1010  void* data);
1011 
1012  void SetGetDetachednessCallback(GetDetachednessCallback callback, void* data);
1013 
1014  /**
1015  * Default value of persistent handle class ID. Must not be used to
1016  * define a class. Can be used to reset a class of a persistent
1017  * handle.
1018  */
1019  static const uint16_t kPersistentHandleNoClassId = 0;
1020 
1021  private:
1022  HeapProfiler();
1023  ~HeapProfiler();
1024  HeapProfiler(const HeapProfiler&);
1025  HeapProfiler& operator=(const HeapProfiler&);
1026 };
1027 
1028 /**
1029  * A struct for exporting HeapStats data from V8, using "push" model.
1030  * See HeapProfiler::GetHeapStats.
1031  */
1033  HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
1034  : index(index), count(count), size(size) { }
1035  uint32_t index; // Index of the time interval that was changed.
1036  uint32_t count; // New value of count field for the interval with this index.
1037  uint32_t size; // New value of size field for the interval with this index.
1038 };
1039 
1040 #define CODE_EVENTS_LIST(V)
1041  V(Builtin)
1042  V(Callback)
1043  V(Eval)
1044  V(Function)
1045  V(InterpretedFunction)
1046  V(Handler)
1047  V(BytecodeHandler)
1048  V(LazyCompile)
1049  V(RegExp)
1050  V(Script)
1051  V(Stub)
1052  V(Relocation)
1053 
1054 /**
1055  * Note that this enum may be extended in the future. Please include a default
1056  * case if this enum is used in a switch statement.
1057  */
1059  kUnknownType = 0
1060 #define V(Name) , k##Name##Type
1062 #undef V
1063 };
1064 
1065 /**
1066  * Representation of a code creation event
1067  */
1069  public:
1070  uintptr_t GetCodeStartAddress();
1071  size_t GetCodeSize();
1076  /**
1077  * NOTE (mmarchini): We can't allocate objects in the heap when we collect
1078  * existing code, and both the code type and the comment are not stored in the
1079  * heap, so we return those as const char*.
1080  */
1082  const char* GetComment();
1083 
1084  static const char* GetCodeEventTypeName(CodeEventType code_event_type);
1085 
1087 };
1088 
1089 /**
1090  * Interface to listen to code creation and code relocation events.
1091  */
1093  public:
1094  /**
1095  * Creates a new listener for the |isolate|. The isolate must be initialized.
1096  * The listener object must be disposed after use by calling |Dispose| method.
1097  * Multiple listeners can be created for the same isolate.
1098  */
1099  explicit CodeEventHandler(Isolate* isolate);
1100  virtual ~CodeEventHandler();
1101 
1102  /**
1103  * Handle is called every time a code object is created or moved. Information
1104  * about each code event will be available through the `code_event`
1105  * parameter.
1106  *
1107  * When the CodeEventType is kRelocationType, the code for this CodeEvent has
1108  * moved from `GetPreviousCodeStartAddress()` to `GetCodeStartAddress()`.
1109  */
1110  virtual void Handle(CodeEvent* code_event) = 0;
1111 
1112  /**
1113  * Call `Enable()` to starts listening to code creation and code relocation
1114  * events. These events will be handled by `Handle()`.
1115  */
1116  void Enable();
1117 
1118  /**
1119  * Call `Disable()` to stop listening to code creation and code relocation
1120  * events.
1121  */
1122  void Disable();
1123 
1124  private:
1125  CodeEventHandler();
1126  CodeEventHandler(const CodeEventHandler&);
1127  CodeEventHandler& operator=(const CodeEventHandler&);
1128  void* internal_listener_;
1129 };
1130 
1131 } // namespace v8
1132 
1133 
1134 #endif // V8_V8_PROFILER_H_