v8  9.4.146 (node 16.15.0)
V8 is Google's open source JavaScript engine
v8-profiler.h
Go to the documentation of this file.
1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_V8_PROFILER_H_
6 #define V8_V8_PROFILER_H_
7 
8 #include <limits.h>
9 
10 #include <memory>
11 #include <unordered_set>
12 #include <vector>
13 
14 #include "v8.h" // NOLINT(build/include_directory)
15 
16 /**
17  * Profiler support for the V8 JavaScript engine.
18  */
19 namespace v8 {
20 
21 class HeapGraphNode;
22 struct HeapStatsUpdate;
23 
24 using NativeObject = void*;
25 using SnapshotObjectId = uint32_t;
26 
28  int script_id;
29  size_t position;
30 };
31 
32 namespace internal {
33 class CpuProfile;
34 } // namespace internal
35 
36 } // namespace v8
37 
38 #ifdef V8_OS_WIN
39 template class V8_EXPORT std::vector<v8::CpuProfileDeoptFrame>;
40 #endif
41 
42 namespace v8 {
43 
45  /** A pointer to a static string owned by v8. */
46  const char* deopt_reason;
48 };
49 
50 } // namespace v8
51 
52 #ifdef V8_OS_WIN
53 template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
54 #endif
55 
56 namespace v8 {
57 
58 /**
59  * CpuProfileNode represents a node in a call graph.
60  */
62  public:
63  struct LineTick {
64  /** The 1-based number of the source line where the function originates. */
65  int line;
66 
67  /** The count of samples associated with the source line. */
68  unsigned int hit_count;
69  };
70 
71  // An annotation hinting at the source of a CpuProfileNode.
72  enum SourceType {
73  // User-supplied script with associated resource information.
74  kScript = 0,
75  // Native scripts and provided builtins.
76  kBuiltin = 1,
77  // Callbacks into native code.
78  kCallback = 2,
79  // VM-internal functions or state.
80  kInternal = 3,
81  // A node that failed to symbolize.
83  };
84 
85  /** Returns function name (empty string for anonymous functions.) */
87 
88  /**
89  * Returns function name (empty string for anonymous functions.)
90  * The string ownership is *not* passed to the caller. It stays valid until
91  * profile is deleted. The function is thread safe.
92  */
93  const char* GetFunctionNameStr() const;
94 
95  /** Returns id of the script where function is located. */
96  int GetScriptId() const;
97 
98  /** Returns resource name for script from where the function originates. */
100 
101  /**
102  * Returns resource name for script from where the function originates.
103  * The string ownership is *not* passed to the caller. It stays valid until
104  * profile is deleted. The function is thread safe.
105  */
106  const char* GetScriptResourceNameStr() const;
107 
108  /**
109  * Return true if the script from where the function originates is flagged as
110  * being shared cross-origin.
111  */
113 
114  /**
115  * Returns the number, 1-based, of the line where the function originates.
116  * kNoLineNumberInfo if no line number information is available.
117  */
118  int GetLineNumber() const;
119 
120  /**
121  * Returns 1-based number of the column where the function originates.
122  * kNoColumnNumberInfo if no column number information is available.
123  */
124  int GetColumnNumber() const;
125 
126  /**
127  * Returns the number of the function's source lines that collect the samples.
128  */
129  unsigned int GetHitLineCount() const;
130 
131  /** Returns the set of source lines that collect the samples.
132  * The caller allocates buffer and responsible for releasing it.
133  * True if all available entries are copied, otherwise false.
134  * The function copies nothing if buffer is not large enough.
135  */
136  bool GetLineTicks(LineTick* entries, unsigned int length) const;
137 
138  /** Returns bailout reason for the function
139  * if the optimization was disabled for it.
140  */
141  const char* GetBailoutReason() const;
142 
143  /**
144  * Returns the count of samples where the function was currently executing.
145  */
146  unsigned GetHitCount() const;
147 
148  /** Returns id of the node. The id is unique within the tree */
149  unsigned GetNodeId() const;
150 
151  /**
152  * Gets the type of the source which the node was captured from.
153  */
155 
156  /** Returns child nodes count of the node. */
157  int GetChildrenCount() const;
158 
159  /** Retrieves a child node by index. */
160  const CpuProfileNode* GetChild(int index) const;
161 
162  /** Retrieves the ancestor node, or null if the root. */
163  const CpuProfileNode* GetParent() const;
164 
165  /** Retrieves deopt infos for the node. */
166  const std::vector<CpuProfileDeoptInfo>& GetDeoptInfos() const;
167 
170 };
171 
172 
173 /**
174  * CpuProfile contains a CPU profile in a form of top-down call tree
175  * (from main() down to functions that do all the work).
176  */
178  public:
179  /** Returns CPU profile title. */
181 
182  /** Returns the root node of the top down call tree. */
184 
185  /**
186  * Returns number of samples recorded. The samples are not recorded unless
187  * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true.
188  */
189  int GetSamplesCount() const;
190 
191  /**
192  * Returns profile node corresponding to the top frame the sample at
193  * the given index.
194  */
195  const CpuProfileNode* GetSample(int index) const;
196 
197  /**
198  * Returns the timestamp of the sample. The timestamp is the number of
199  * microseconds since some unspecified starting point.
200  * The point is equal to the starting point used by GetStartTime.
201  */
202  int64_t GetSampleTimestamp(int index) const;
203 
204  /**
205  * Returns time when the profile recording was started (in microseconds)
206  * since some unspecified starting point.
207  */
208  int64_t GetStartTime() const;
209 
210  /**
211  * Returns time when the profile recording was stopped (in microseconds)
212  * since some unspecified starting point.
213  * The point is equal to the starting point used by GetStartTime.
214  */
215  int64_t GetEndTime() const;
216 
217  /**
218  * Deletes the profile and removes it from CpuProfiler's list.
219  * All pointers to nodes previously returned become invalid.
220  */
221  void Delete();
222 };
223 
225  // In the resulting CpuProfile tree, intermediate nodes in a stack trace
226  // (from the root to a leaf) will have line numbers that point to the start
227  // line of the function, rather than the line of the callsite of the child.
229  // In the resulting CpuProfile tree, nodes are separated based on the line
230  // number of their callsite in their parent.
232 };
233 
234 // Determines how names are derived for functions sampled.
236  // Use the immediate name of functions at compilation time.
238  // Use more verbose naming for functions without names, inferred from scope
239  // where possible.
241 };
242 
244  // Enables logging when a profile is active, and disables logging when all
245  // profiles are detached.
247  // Enables logging for the lifetime of the CpuProfiler. Calls to
248  // StartRecording are faster, at the expense of runtime overhead.
250 };
251 
252 // Enum for returning profiling status. Once StartProfiling is called,
253 // we want to return to clients whether the profiling was able to start
254 // correctly, or return a descriptive error.
255 enum class CpuProfilingStatus {
256  kStarted,
259 };
260 
261 /**
262  * Delegate for when max samples reached and samples are discarded.
263  */
265  public:
267 
268  virtual ~DiscardedSamplesDelegate() = default;
269  virtual void Notify() = 0;
270 };
271 
272 /**
273  * Optional profiling attributes.
274  */
276  public:
277  // Indicates that the sample buffer size should not be explicitly limited.
278  static const unsigned kNoSampleLimit = UINT_MAX;
279 
280  /**
281  * \param mode Type of computation of stack frame line numbers.
282  * \param max_samples The maximum number of samples that should be recorded by
283  * the profiler. Samples obtained after this limit will be
284  * discarded.
285  * \param sampling_interval_us controls the profile-specific target
286  * sampling interval. The provided sampling
287  * interval will be snapped to the next lowest
288  * non-zero multiple of the profiler's sampling
289  * interval, set via SetSamplingInterval(). If
290  * zero, the sampling interval will be equal to
291  * the profiler's sampling interval.
292  * \param filter_context If specified, profiles will only contain frames
293  * using this context. Other frames will be elided.
294  */
297  unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0,
298  MaybeLocal<Context> filter_context = MaybeLocal<Context>());
299 
300  CpuProfilingMode mode() const { return mode_; }
301  unsigned max_samples() const { return max_samples_; }
302  int sampling_interval_us() const { return sampling_interval_us_; }
303 
304  private:
305  friend class internal::CpuProfile;
306 
307  bool has_filter_context() const { return !filter_context_.IsEmpty(); }
308  void* raw_filter_context() const;
309 
310  CpuProfilingMode mode_;
311  unsigned max_samples_;
312  int sampling_interval_us_;
313  CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
314 };
315 
316 /**
317  * Interface for controlling CPU profiling. Instance of the
318  * profiler can be created using v8::CpuProfiler::New method.
319  */
321  public:
322  /**
323  * Creates a new CPU profiler for the |isolate|. The isolate must be
324  * initialized. The profiler object must be disposed after use by calling
325  * |Dispose| method.
326  */
327  static CpuProfiler* New(Isolate* isolate,
330 
331  /**
332  * Synchronously collect current stack sample in all profilers attached to
333  * the |isolate|. The call does not affect number of ticks recorded for
334  * the current top node.
335  */
336  static void CollectSample(Isolate* isolate);
337 
338  /**
339  * Disposes the CPU profiler object.
340  */
341  void Dispose();
342 
343  /**
344  * Changes default CPU profiler sampling interval to the specified number
345  * of microseconds. Default interval is 1000us. This method must be called
346  * when there are no profiles being recorded.
347  */
348  void SetSamplingInterval(int us);
349 
350  /**
351  * Sets whether or not the profiler should prioritize consistency of sample
352  * periodicity on Windows. Disabling this can greatly reduce CPU usage, but
353  * may result in greater variance in sample timings from the platform's
354  * scheduler. Defaults to enabled. This method must be called when there are
355  * no profiles being recorded.
356  */
358 
359  /**
360  * Starts collecting a CPU profile. Title may be an empty string. Several
361  * profiles may be collected at once. Attempts to start collecting several
362  * profiles with the same title are silently ignored.
363  */
365  Local<String> title, CpuProfilingOptions options,
366  std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
367 
368  /**
369  * Starts profiling with the same semantics as above, except with expanded
370  * parameters.
371  *
372  * |record_samples| parameter controls whether individual samples should
373  * be recorded in addition to the aggregated tree.
374  *
375  * |max_samples| controls the maximum number of samples that should be
376  * recorded by the profiler. Samples obtained after this limit will be
377  * discarded.
378  */
380  Local<String> title, CpuProfilingMode mode, bool record_samples = false,
381  unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
382  /**
383  * The same as StartProfiling above, but the CpuProfilingMode defaults to
384  * kLeafNodeLineNumbers mode, which was the previous default behavior of the
385  * profiler.
386  */
388  bool record_samples = false);
389 
390  /**
391  * Stops collecting CPU profile with a given title and returns it.
392  * If the title given is empty, finishes the last profile started.
393  */
395 
396  /**
397  * Generate more detailed source positions to code objects. This results in
398  * better results when mapping profiling samples to script source.
399  */
401 
402  private:
403  CpuProfiler();
404  ~CpuProfiler();
405  CpuProfiler(const CpuProfiler&);
406  CpuProfiler& operator=(const CpuProfiler&);
407 };
408 
409 /**
410  * HeapSnapshotEdge represents a directed connection between heap
411  * graph nodes: from retainers to retained nodes.
412  */
414  public:
415  enum Type {
416  kContextVariable = 0, // A variable from a function context.
417  kElement = 1, // An element of an array.
418  kProperty = 2, // A named object property.
419  kInternal = 3, // A link that can't be accessed from JS,
420  // thus, its name isn't a real property name
421  // (e.g. parts of a ConsString).
422  kHidden = 4, // A link that is needed for proper sizes
423  // calculation, but may be hidden from user.
424  kShortcut = 5, // A link that must not be followed during
425  // sizes calculation.
426  kWeak = 6 // A weak reference (ignored by the GC).
427  };
428 
429  /** Returns edge type (see HeapGraphEdge::Type). */
430  Type GetType() const;
431 
432  /**
433  * Returns edge name. This can be a variable name, an element index, or
434  * a property name.
435  */
436  Local<Value> GetName() const;
437 
438  /** Returns origin node. */
439  const HeapGraphNode* GetFromNode() const;
440 
441  /** Returns destination node. */
442  const HeapGraphNode* GetToNode() const;
443 };
444 
445 
446 /**
447  * HeapGraphNode represents a node in a heap graph.
448  */
450  public:
451  enum Type {
452  kHidden = 0, // Hidden node, may be filtered when shown to user.
453  kArray = 1, // An array of elements.
454  kString = 2, // A string.
455  kObject = 3, // A JS object (except for arrays and strings).
456  kCode = 4, // Compiled code.
457  kClosure = 5, // Function closure.
458  kRegExp = 6, // RegExp.
459  kHeapNumber = 7, // Number stored in the heap.
460  kNative = 8, // Native object (not from V8 heap).
461  kSynthetic = 9, // Synthetic object, usually used for grouping
462  // snapshot items together.
463  kConsString = 10, // Concatenated string. A pair of pointers to strings.
464  kSlicedString = 11, // Sliced string. A fragment of another string.
465  kSymbol = 12, // A Symbol (ES6).
466  kBigInt = 13 // BigInt.
467  };
468 
469  /** Returns node type (see HeapGraphNode::Type). */
470  Type GetType() const;
471 
472  /**
473  * Returns node name. Depending on node's type this can be the name
474  * of the constructor (for objects), the name of the function (for
475  * closures), string value, or an empty string (for compiled code).
476  */
477  Local<String> GetName() const;
478 
479  /**
480  * Returns node id. For the same heap object, the id remains the same
481  * across all snapshots.
482  */
483  SnapshotObjectId GetId() const;
484 
485  /** Returns node's own size, in bytes. */
486  size_t GetShallowSize() const;
487 
488  /** Returns child nodes count of the node. */
489  int GetChildrenCount() const;
490 
491  /** Retrieves a child by index. */
492  const HeapGraphEdge* GetChild(int index) const;
493 };
494 
495 
496 /**
497  * An interface for exporting data from V8, using "push" model.
498  */
500  public:
501  enum WriteResult {
503  kAbort = 1
504  };
505  virtual ~OutputStream() = default;
506  /** Notify about the end of stream. */
507  virtual void EndOfStream() = 0;
508  /** Get preferred output chunk size. Called only once. */
509  virtual int GetChunkSize() { return 1024; }
510  /**
511  * Writes the next chunk of snapshot data into the stream. Writing
512  * can be stopped by returning kAbort as function result. EndOfStream
513  * will not be called in case writing was aborted.
514  */
515  virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
516  /**
517  * Writes the next chunk of heap stats data into the stream. Writing
518  * can be stopped by returning kAbort as function result. EndOfStream
519  * will not be called in case writing was aborted.
520  */
521  virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
522  return kAbort;
523  }
524 };
525 
526 /**
527  * HeapSnapshots record the state of the JS heap at some moment.
528  */
530  public:
532  kJSON = 0 // See format description near 'Serialize' method.
533  };
534 
535  /** Returns the root node of the heap graph. */
536  const HeapGraphNode* GetRoot() const;
537 
538  /** Returns a node by its id. */
539  const HeapGraphNode* GetNodeById(SnapshotObjectId id) const;
540 
541  /** Returns total nodes count in the snapshot. */
542  int GetNodesCount() const;
543 
544  /** Returns a node by index. */
545  const HeapGraphNode* GetNode(int index) const;
546 
547  /** Returns a max seen JS object Id. */
548  SnapshotObjectId GetMaxSnapshotJSObjectId() const;
549 
550  /**
551  * Deletes the snapshot and removes it from HeapProfiler's list.
552  * All pointers to nodes, edges and paths previously returned become
553  * invalid.
554  */
555  void Delete();
556 
557  /**
558  * Prepare a serialized representation of the snapshot. The result
559  * is written into the stream provided in chunks of specified size.
560  * The total length of the serialized snapshot is unknown in
561  * advance, it can be roughly equal to JS heap size (that means,
562  * it can be really big - tens of megabytes).
563  *
564  * For the JSON format, heap contents are represented as an object
565  * with the following structure:
566  *
567  * {
568  * snapshot: {
569  * title: "...",
570  * uid: nnn,
571  * meta: { meta-info },
572  * node_count: nnn,
573  * edge_count: nnn
574  * },
575  * nodes: [nodes array],
576  * edges: [edges array],
577  * strings: [strings array]
578  * }
579  *
580  * Nodes reference strings, other nodes, and edges by their indexes
581  * in corresponding arrays.
582  */
583  void Serialize(OutputStream* stream,
584  SerializationFormat format = kJSON) const;
585 };
586 
587 
588 /**
589  * An interface for reporting progress and controlling long-running
590  * activities.
591  */
593  public:
596  kAbort = 1
597  };
598  virtual ~ActivityControl() = default;
599  /**
600  * Notify about current progress. The activity can be stopped by
601  * returning kAbort as the callback result.
602  */
603  virtual ControlOption ReportProgressValue(int done, int total) = 0;
604 };
605 
606 /**
607  * AllocationProfile is a sampled profile of allocations done by the program.
608  * This is structured as a call-graph.
609  */
611  public:
612  struct Allocation {
613  /**
614  * Size of the sampled allocation object.
615  */
616  size_t size;
617 
618  /**
619  * The number of objects of such size that were sampled.
620  */
621  unsigned int count;
622  };
623 
624  /**
625  * Represents a node in the call-graph.
626  */
627  struct Node {
628  /**
629  * Name of the function. May be empty for anonymous functions or if the
630  * script corresponding to this function has been unloaded.
631  */
633 
634  /**
635  * Name of the script containing the function. May be empty if the script
636  * name is not available, or if the script has been unloaded.
637  */
639 
640  /**
641  * id of the script where the function is located. May be equal to
642  * v8::UnboundScript::kNoScriptId in cases where the script doesn't exist.
643  */
645 
646  /**
647  * Start position of the function in the script.
648  */
650 
651  /**
652  * 1-indexed line number where the function starts. May be
653  * kNoLineNumberInfo if no line number information is available.
654  */
656 
657  /**
658  * 1-indexed column number where the function starts. May be
659  * kNoColumnNumberInfo if no line number information is available.
660  */
662 
663  /**
664  * Unique id of the node.
665  */
666  uint32_t node_id;
667 
668  /**
669  * List of callees called from this node for which we have sampled
670  * allocations. The lifetime of the children is scoped to the containing
671  * AllocationProfile.
672  */
673  std::vector<Node*> children;
674 
675  /**
676  * List of self allocations done by this node in the call-graph.
677  */
678  std::vector<Allocation> allocations;
679  };
680 
681  /**
682  * Represent a single sample recorded for an allocation.
683  */
684  struct Sample {
685  /**
686  * id of the node in the profile tree.
687  */
688  uint32_t node_id;
689 
690  /**
691  * Size of the sampled allocation object.
692  */
693  size_t size;
694 
695  /**
696  * The number of objects of such size that were sampled.
697  */
698  unsigned int count;
699 
700  /**
701  * Unique time-ordered id of the allocation sample. Can be used to track
702  * what samples were added or removed between two snapshots.
703  */
704  uint64_t sample_id;
705  };
706 
707  /**
708  * Returns the root node of the call-graph. The root node corresponds to an
709  * empty JS call-stack. The lifetime of the returned Node* is scoped to the
710  * containing AllocationProfile.
711  */
712  virtual Node* GetRootNode() = 0;
713  virtual const std::vector<Sample>& GetSamples() = 0;
714 
715  virtual ~AllocationProfile() = default;
716 
719 };
720 
721 /**
722  * An object graph consisting of embedder objects and V8 objects.
723  * Edges of the graph are strong references between the objects.
724  * The embedder can build this graph during heap snapshot generation
725  * to include the embedder objects in the heap snapshot.
726  * Usage:
727  * 1) Define derived class of EmbedderGraph::Node for embedder objects.
728  * 2) Set the build embedder graph callback on the heap profiler using
729  * HeapProfiler::AddBuildEmbedderGraphCallback.
730  * 3) In the callback use graph->AddEdge(node1, node2) to add an edge from
731  * node1 to node2.
732  * 4) To represent references from/to V8 object, construct V8 nodes using
733  * graph->V8Node(value).
734  */
736  public:
737  class Node {
738  public:
739  /**
740  * Detachedness specifies whether an object is attached or detached from the
741  * main application state. While unkown in general, there may be objects
742  * that specifically know their state. V8 passes this information along in
743  * the snapshot. Users of the snapshot may use it to annotate the object
744  * graph.
745  */
746  enum class Detachedness : uint8_t {
747  kUnknown = 0,
748  kAttached = 1,
749  kDetached = 2,
750  };
751 
752  Node() = default;
753  virtual ~Node() = default;
754  virtual const char* Name() = 0;
755  virtual size_t SizeInBytes() = 0;
756  /**
757  * The corresponding V8 wrapper node if not null.
758  * During heap snapshot generation the embedder node and the V8 wrapper
759  * node will be merged into one node to simplify retaining paths.
760  */
761  virtual Node* WrapperNode() { return nullptr; }
762  virtual bool IsRootNode() { return false; }
763  /** Must return true for non-V8 nodes. */
764  virtual bool IsEmbedderNode() { return true; }
765  /**
766  * Optional name prefix. It is used in Chrome for tagging detached nodes.
767  */
768  virtual const char* NamePrefix() { return nullptr; }
769 
770  /**
771  * Returns the NativeObject that can be used for querying the
772  * |HeapSnapshot|.
773  */
774  virtual NativeObject GetNativeObject() { return nullptr; }
775 
776  /**
777  * Detachedness state of a given object. While unkown in general, there may
778  * be objects that specifically know their state. V8 passes this information
779  * along in the snapshot. Users of the snapshot may use it to annotate the
780  * object graph.
781  */
783 
784  Node(const Node&) = delete;
785  Node& operator=(const Node&) = delete;
786  };
787 
788  /**
789  * Returns a node corresponding to the given V8 value. Ownership is not
790  * transferred. The result pointer is valid while the graph is alive.
791  */
792  virtual Node* V8Node(const v8::Local<v8::Value>& value) = 0;
793 
794  /**
795  * Adds the given node to the graph and takes ownership of the node.
796  * Returns a raw pointer to the node that is valid while the graph is alive.
797  */
798  virtual Node* AddNode(std::unique_ptr<Node> node) = 0;
799 
800  /**
801  * Adds an edge that represents a strong reference from the given
802  * node |from| to the given node |to|. The nodes must be added to the graph
803  * before calling this function.
804  *
805  * If name is nullptr, the edge will have auto-increment indexes, otherwise
806  * it will be named accordingly.
807  */
808  virtual void AddEdge(Node* from, Node* to, const char* name = nullptr) = 0;
809 
810  virtual ~EmbedderGraph() = default;
811 };
812 
813 /**
814  * Interface for controlling heap profiling. Instance of the
815  * profiler can be retrieved using v8::Isolate::GetHeapProfiler.
816  */
818  public:
822  };
823 
824  /**
825  * Callback function invoked during heap snapshot generation to retrieve
826  * the embedder object graph. The callback should use graph->AddEdge(..) to
827  * add references between the objects.
828  * The callback must not trigger garbage collection in V8.
829  */
830  typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate,
831  v8::EmbedderGraph* graph,
832  void* data);
833 
834  /**
835  * Callback function invoked during heap snapshot generation to retrieve
836  * the detachedness state of an object referenced by a TracedReference.
837  *
838  * The callback takes Local<Value> as parameter to allow the embedder to
839  * unpack the TracedReference into a Local and reuse that Local for different
840  * purposes.
841  */
842  using GetDetachednessCallback = EmbedderGraph::Node::Detachedness (*)(
843  v8::Isolate* isolate, const v8::Local<v8::Value>& v8_value,
844  uint16_t class_id, void* data);
845 
846  /** Returns the number of snapshots taken. */
848 
849  /** Returns a snapshot by index. */
850  const HeapSnapshot* GetHeapSnapshot(int index);
851 
852  /**
853  * Returns SnapshotObjectId for a heap object referenced by |value| if
854  * it has been seen by the heap profiler, kUnknownObjectId otherwise.
855  */
856  SnapshotObjectId GetObjectId(Local<Value> value);
857 
858  /**
859  * Returns SnapshotObjectId for a native object referenced by |value| if it
860  * has been seen by the heap profiler, kUnknownObjectId otherwise.
861  */
862  SnapshotObjectId GetObjectId(NativeObject value);
863 
864  /**
865  * Returns heap object with given SnapshotObjectId if the object is alive,
866  * otherwise empty handle is returned.
867  */
868  Local<Value> FindObjectById(SnapshotObjectId id);
869 
870  /**
871  * Clears internal map from SnapshotObjectId to heap object. The new objects
872  * will not be added into it unless a heap snapshot is taken or heap object
873  * tracking is kicked off.
874  */
876 
877  /**
878  * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
879  * it in case heap profiler cannot find id for the object passed as
880  * parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
881  */
882  static const SnapshotObjectId kUnknownObjectId = 0;
883 
884  /**
885  * Callback interface for retrieving user friendly names of global objects.
886  */
888  public:
889  /**
890  * Returns name to be used in the heap snapshot for given node. Returned
891  * string must stay alive until snapshot collection is completed.
892  */
893  virtual const char* GetName(Local<Object> object) = 0;
894 
895  protected:
896  virtual ~ObjectNameResolver() = default;
897  };
898 
899  /**
900  * Takes a heap snapshot and returns it.
901  */
903  ActivityControl* control = nullptr,
904  ObjectNameResolver* global_object_name_resolver = nullptr,
905  bool treat_global_objects_as_roots = true,
906  bool capture_numeric_value = false);
907 
908  /**
909  * Starts tracking of heap objects population statistics. After calling
910  * this method, all heap objects relocations done by the garbage collector
911  * are being registered.
912  *
913  * |track_allocations| parameter controls whether stack trace of each
914  * allocation in the heap will be recorded and reported as part of
915  * HeapSnapshot.
916  */
917  void StartTrackingHeapObjects(bool track_allocations = false);
918 
919  /**
920  * Adds a new time interval entry to the aggregated statistics array. The
921  * time interval entry contains information on the current heap objects
922  * population size. The method also updates aggregated statistics and
923  * reports updates for all previous time intervals via the OutputStream
924  * object. Updates on each time interval are provided as a stream of the
925  * HeapStatsUpdate structure instances.
926  * If |timestamp_us| is supplied, timestamp of the new entry will be written
927  * into it. The return value of the function is the last seen heap object Id.
928  *
929  * StartTrackingHeapObjects must be called before the first call to this
930  * method.
931  */
932  SnapshotObjectId GetHeapStats(OutputStream* stream,
933  int64_t* timestamp_us = nullptr);
934 
935  /**
936  * Stops tracking of heap objects population statistics, cleans up all
937  * collected data. StartHeapObjectsTracking must be called again prior to
938  * calling GetHeapStats next time.
939  */
941 
942  /**
943  * Starts gathering a sampling heap profile. A sampling heap profile is
944  * similar to tcmalloc's heap profiler and Go's mprof. It samples object
945  * allocations and builds an online 'sampling' heap profile. At any point in
946  * time, this profile is expected to be a representative sample of objects
947  * currently live in the system. Each sampled allocation includes the stack
948  * trace at the time of allocation, which makes this really useful for memory
949  * leak detection.
950  *
951  * This mechanism is intended to be cheap enough that it can be used in
952  * production with minimal performance overhead.
953  *
954  * Allocations are sampled using a randomized Poisson process. On average, one
955  * allocation will be sampled every |sample_interval| bytes allocated. The
956  * |stack_depth| parameter controls the maximum number of stack frames to be
957  * captured on each allocation.
958  *
959  * NOTE: This is a proof-of-concept at this point. Right now we only sample
960  * newspace allocations. Support for paged space allocation (e.g. pre-tenured
961  * objects, large objects, code objects, etc.) and native allocations
962  * doesn't exist yet, but is anticipated in the future.
963  *
964  * Objects allocated before the sampling is started will not be included in
965  * the profile.
966  *
967  * Returns false if a sampling heap profiler is already running.
968  */
969  bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024,
970  int stack_depth = 16,
972 
973  /**
974  * Stops the sampling heap profile and discards the current profile.
975  */
977 
978  /**
979  * Returns the sampled profile of allocations allocated (and still live) since
980  * StartSamplingHeapProfiler was called. The ownership of the pointer is
981  * transferred to the caller. Returns nullptr if sampling heap profiler is not
982  * active.
983  */
985 
986  /**
987  * Deletes all snapshots taken. All previously returned pointers to
988  * snapshots and their contents become invalid after this call.
989  */
991 
993  void* data);
995  void* data);
996 
997  void SetGetDetachednessCallback(GetDetachednessCallback callback, void* data);
998 
999  /**
1000  * Default value of persistent handle class ID. Must not be used to
1001  * define a class. Can be used to reset a class of a persistent
1002  * handle.
1003  */
1004  static const uint16_t kPersistentHandleNoClassId = 0;
1005 
1006  private:
1007  HeapProfiler();
1008  ~HeapProfiler();
1009  HeapProfiler(const HeapProfiler&);
1010  HeapProfiler& operator=(const HeapProfiler&);
1011 };
1012 
1013 /**
1014  * A struct for exporting HeapStats data from V8, using "push" model.
1015  * See HeapProfiler::GetHeapStats.
1016  */
1018  HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
1019  : index(index), count(count), size(size) { }
1020  uint32_t index; // Index of the time interval that was changed.
1021  uint32_t count; // New value of count field for the interval with this index.
1022  uint32_t size; // New value of size field for the interval with this index.
1023 };
1024 
1025 #define CODE_EVENTS_LIST(V)
1026  V(Builtin)
1027  V(Callback)
1028  V(Eval)
1029  V(Function)
1030  V(InterpretedFunction)
1031  V(Handler)
1032  V(BytecodeHandler)
1033  V(LazyCompile)
1034  V(RegExp)
1035  V(Script)
1036  V(Stub)
1037  V(Relocation)
1038 
1039 /**
1040  * Note that this enum may be extended in the future. Please include a default
1041  * case if this enum is used in a switch statement.
1042  */
1044  kUnknownType = 0
1045 #define V(Name) , k##Name##Type
1047 #undef V
1048 };
1049 
1050 /**
1051  * Representation of a code creation event
1052  */
1054  public:
1055  uintptr_t GetCodeStartAddress();
1056  size_t GetCodeSize();
1061  /**
1062  * NOTE (mmarchini): We can't allocate objects in the heap when we collect
1063  * existing code, and both the code type and the comment are not stored in the
1064  * heap, so we return those as const char*.
1065  */
1067  const char* GetComment();
1068 
1069  static const char* GetCodeEventTypeName(CodeEventType code_event_type);
1070 
1072 };
1073 
1074 /**
1075  * Interface to listen to code creation and code relocation events.
1076  */
1078  public:
1079  /**
1080  * Creates a new listener for the |isolate|. The isolate must be initialized.
1081  * The listener object must be disposed after use by calling |Dispose| method.
1082  * Multiple listeners can be created for the same isolate.
1083  */
1084  explicit CodeEventHandler(Isolate* isolate);
1085  virtual ~CodeEventHandler();
1086 
1087  /**
1088  * Handle is called every time a code object is created or moved. Information
1089  * about each code event will be available through the `code_event`
1090  * parameter.
1091  *
1092  * When the CodeEventType is kRelocationType, the code for this CodeEvent has
1093  * moved from `GetPreviousCodeStartAddress()` to `GetCodeStartAddress()`.
1094  */
1095  virtual void Handle(CodeEvent* code_event) = 0;
1096 
1097  /**
1098  * Call `Enable()` to starts listening to code creation and code relocation
1099  * events. These events will be handled by `Handle()`.
1100  */
1101  void Enable();
1102 
1103  /**
1104  * Call `Disable()` to stop listening to code creation and code relocation
1105  * events.
1106  */
1107  void Disable();
1108 
1109  private:
1110  CodeEventHandler();
1111  CodeEventHandler(const CodeEventHandler&);
1112  CodeEventHandler& operator=(const CodeEventHandler&);
1113  void* internal_listener_;
1114 };
1115 
1116 } // namespace v8
1117 
1118 
1119 #endif // V8_V8_PROFILER_H_