1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
3 * ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
16 * The Original Code is SpiderMonkey code.
18 * The Initial Developer of the Original Code is
19 * Mozilla Corporation.
20 * Portions created by the Initial Developer are Copyright (C) 2010
21 * the Initial Developer. All Rights Reserved.
26 * Alternatively, the contents of this file may be used under the terms of
27 * either of the GNU General Public License Version 2 or later (the "GPL"),
28 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
38 * ***** END LICENSE BLOCK ***** */
40 #ifndef jscompartment_h___
41 #define jscompartment_h___
48 #include "jsgcstats.h"
54 #pragma warning(disable:4251) /* Silence warning about JS_FRIEND_API and data members. */
59 class ExecutableAllocator;
65 /* Holds the number of recording attemps for an address. */
66 typedef HashMap<jsbytecode*,
68 DefaultHasher<jsbytecode*>,
69 SystemAllocPolicy> RecordAttemptMap;
71 /* Holds the profile data for loops. */
72 typedef HashMap<jsbytecode*,
74 DefaultHasher<jsbytecode*>,
75 SystemAllocPolicy> LoopProfileMap;
79 typedef HashSet<JSScript *,
80 DefaultHasher<JSScript *>,
81 SystemAllocPolicy> TracedScriptSet;
83 typedef HashMap<JSFunction *,
85 DefaultHasher<JSFunction *>,
86 SystemAllocPolicy> ToSourceCache;
90 /* Holds the execution state during trace execution. */
93 JSContext* cx; // current VM context handle
94 TraceMonitor* traceMonitor; // current TM
95 double* stackBase; // native stack base
96 double* sp; // native stack pointer, stack[0] is spbase[0]
97 double* eos; // first unusable word after the native stack / begin of globals
98 FrameInfo** callstackBase; // call stack base
99 void* sor; // start of rp stack
100 FrameInfo** rp; // call stack pointer
101 void* eor; // first unusable word after the call stack
102 VMSideExit* lastTreeExitGuard; // guard we exited on during a tree call
103 VMSideExit* lastTreeCallGuard; // guard we want to grow from if the tree
104 // call exit guard mismatched
105 void* rpAtLastTreeCall; // value of rp at innermost tree call guard
106 VMSideExit* outermostTreeExitGuard; // the last side exit returned by js_CallTree
107 TreeFragment* outermostTree; // the outermost tree we initially invoked
108 uintN* inlineCallCountp; // inline call count counter
109 VMSideExit** innermostNestedGuardp;
110 VMSideExit* innermost;
114 // Used by _FAIL builtins; see jsbuiltins.h. The builtin sets the
115 // JSBUILTIN_BAILED bit if it bails off trace and the JSBUILTIN_ERROR bit
116 // if an error or exception occurred.
117 uint32 builtinStatus;
119 // Used to communicate the location of the return value in case of a deep bail.
122 // Used when calling natives from trace to root the vp vector.
126 TracerState(JSContext *cx, TraceMonitor *tm, TreeFragment *ti,
127 uintN &inlineCallCountp, VMSideExit** innermostNestedGuardp);
132 * Storage for the execution state and store during trace execution. Generated
133 * code depends on the fact that the globals begin |MAX_NATIVE_STACK_SLOTS|
134 * doubles after the stack begins. Thus, on trace, |TracerState::eos| holds a
135 * pointer to the first global.
137 struct TraceNativeStorage
139 double stack_global_buf[MAX_NATIVE_STACK_SLOTS + GLOBAL_SLOTS_BUFFER_SIZE];
140 FrameInfo *callstack_buf[MAX_CALL_STACK_ENTRIES];
142 double *stack() { return stack_global_buf; }
143 double *global() { return stack_global_buf + MAX_NATIVE_STACK_SLOTS; }
144 FrameInfo **callstack() { return callstack_buf; }
147 /* Holds data to track a single globa. */
151 SlotList* globalSlots;
155 * Trace monitor. Every JSCompartment has an associated trace monitor
156 * that keeps track of loop frequencies for all JavaScript code loaded
159 struct TraceMonitor {
161 * The context currently executing JIT-compiled code in this compartment, or
162 * NULL if none. Among other things, this can in certain cases prevent
163 * last-ditch GC and suppress calls to JS_ReportOutOfMemory.
165 * !tracecx && !recorder: not on trace
166 * !tracecx && recorder: recording
167 * tracecx && !recorder: executing a trace
168 * tracecx && recorder: executing inner loop, recording outer loop
173 * State for the current tree execution. bailExit is valid if the tree has
174 * called back into native code via a _FAIL builtin and has not yet bailed,
175 * else garbage (NULL in debug builds).
177 js::TracerState *tracerState;
178 js::VMSideExit *bailExit;
180 /* Counts the number of iterations run by the currently executing trace. */
181 unsigned iterationCounter;
184 * Cached storage to use when executing on trace. While we may enter nested
185 * traces, we always reuse the outer trace's storage, so never need more
188 TraceNativeStorage *storage;
191 * There are 4 allocators here. This might seem like overkill, but they
192 * have different lifecycles, and by keeping them separate we keep the
193 * amount of retained memory down significantly. They are flushed (ie.
194 * all the allocated memory is freed) periodically.
196 * - dataAlloc has the lifecycle of the monitor. It's flushed only when
197 * the monitor is flushed. It's used for fragments.
199 * - traceAlloc has the same flush lifecycle as the dataAlloc, but it is
200 * also *marked* when a recording starts and rewinds to the mark point
201 * if recording aborts. So you can put things in it that are only
202 * reachable on a successful record/compile cycle like GuardRecords and
205 * - tempAlloc is flushed after each recording, successful or not. It's
206 * used to store LIR code and for all other elements in the LIR
209 * - codeAlloc has the same lifetime as dataAlloc, but its API is
210 * different (CodeAlloc vs. VMAllocator). It's used for native code.
211 * It's also a good idea to keep code and data separate to avoid I-cache
212 * vs. D-cache issues.
214 VMAllocator* dataAlloc;
215 VMAllocator* traceAlloc;
216 VMAllocator* tempAlloc;
217 nanojit::CodeAlloc* codeAlloc;
218 nanojit::Assembler* assembler;
219 FrameInfoCache* frameCache;
221 /* This gets incremented every time the monitor is flushed. */
225 TraceRecorder* recorder;
227 /* If we are profiling a loop, this tracks the current profile. Otherwise NULL. */
228 LoopProfile* profile;
230 GlobalState globalStates[MONITOR_N_GLOBAL_STATES];
231 TreeFragment *vmfragments[FRAGMENT_TABLE_SIZE];
232 RecordAttemptMap* recordAttempts;
234 /* A hashtable mapping PC values to loop profiles for those loops. */
235 LoopProfileMap* loopProfiles;
238 * Maximum size of the code cache before we start flushing. 1/16 of this
239 * size is used as threshold for the regular expression code cache.
241 uint32 maxCodeCacheBytes;
244 * If nonzero, do not flush the JIT cache after a deep bail. That would
245 * free JITted code pages that we will later return to. Instead, set the
246 * needFlush flag so that it can be flushed later.
250 // Cached temporary typemap to avoid realloc'ing every time we create one.
251 // This must be used in only one place at a given time. It must be cleared
253 TypeMap* cachedTempTypeMap;
255 /* Scripts with recorded fragments. */
256 TracedScriptSet tracedScripts;
259 /* Fields needed for fragment/guard profiling. */
260 nanojit::Seq<nanojit::Fragment*>* branches;
263 * profAlloc has a lifetime which spans exactly from InitJIT to
266 VMAllocator* profAlloc;
267 FragStatsMap* profTab;
270 bool ontrace() const {
274 /* Flush the JIT cache. */
277 /* Sweep any cache entry pointing to dead GC things. */
278 void sweep(JSContext *cx);
280 /* Mark any tracer stacks that are active. */
281 void mark(JSTracer *trc);
283 bool outOfMemory() const;
287 class JaegerCompartment;
291 /* Number of potentially reusable scriptsToGC to search for the eval cache. */
292 #ifndef JS_EVAL_CACHE_SHIFT
293 # define JS_EVAL_CACHE_SHIFT 6
295 #define JS_EVAL_CACHE_SIZE JS_BIT(JS_EVAL_CACHE_SHIFT)
298 # define EVAL_CACHE_METER_LIST(_) _(probe), _(hit), _(step), _(noscope)
299 # define identity(x) x
301 struct JSEvalCacheMeter {
302 uint64 EVAL_CACHE_METER_LIST(identity);
310 class NativeIterCache {
311 static const size_t SIZE = size_t(1) << 8;
313 /* Cached native iterators. */
314 JSObject *data[SIZE];
316 static size_t getIndex(uint32 key) {
317 return size_t(key) % SIZE;
321 /* Native iterator most recently started. */
334 JSObject *get(uint32 key) const {
335 return data[getIndex(key)];
338 void set(uint32 key, JSObject *iterobj) {
339 data[getIndex(key)] = iterobj;
344 * A single-entry cache for some base-10 double-to-string conversions. This
345 * helps date-format-xparb.js. It also avoids skewing the results for
346 * v8-splay.js when measured by the SunSpider harness, where the splay tree
347 * initialization (which includes many repeated double-to-string conversions)
348 * is erroneously included in the measurement; see bug 562553.
353 JSString *s; // if s==NULL, d and base are not valid
355 DtoaCache() : s(NULL) {}
356 void purge() { s = NULL; }
358 JSString *lookup(jsint base, double d) {
359 return this->s && base == this->base && d == this->d ? this->s : NULL;
362 void cache(jsint base, double d, JSString *s) {
372 struct JS_FRIEND_API(JSCompartment) {
374 JSPrincipals *principals;
375 js::gc::Chunk *chunk;
377 js::gc::ArenaList arenas[js::gc::FINALIZE_LIMIT];
378 js::gc::FreeLists freeLists;
381 size_t gcTriggerBytes;
385 js::gc::JSGCArenaStats compartmentStats[js::gc::FINALIZE_LIMIT];
389 /* Trace-tree JIT recorder/interpreter state. */
390 js::TraceMonitor traceMonitor;
393 /* Hashed lists of scripts created by eval to garbage-collect. */
394 JSScript *scriptsToGC[JS_EVAL_CACHE_SIZE];
397 JSEvalCacheMeter evalCacheMeter;
401 bool active; // GC flag, whether there are active frames
402 js::WrapperMap crossCompartmentWrappers;
405 js::mjit::JaegerCompartment *jaegerCompartment;
409 * Shared scope property tree, and arena-pool for allocating its nodes.
411 js::PropertyTree propertyTree;
414 /* Property metering. */
415 jsrefcount livePropTreeNodes;
416 jsrefcount totalPropTreeNodes;
417 jsrefcount propTreeKidsChunks;
418 jsrefcount liveDictModeNodes;
422 * Runtime-shared empty scopes for well-known built-in objects that lack
423 * class prototypes (the usual locus of an emptyShape). Mnemonic: ABCDEW
425 js::EmptyShape *emptyArgumentsShape;
426 js::EmptyShape *emptyBlockShape;
427 js::EmptyShape *emptyCallShape;
428 js::EmptyShape *emptyDeclEnvShape;
429 js::EmptyShape *emptyEnumeratorShape;
430 js::EmptyShape *emptyWithShape;
432 typedef js::HashSet<js::EmptyShape *,
433 js::DefaultHasher<js::EmptyShape *>,
434 js::SystemAllocPolicy> EmptyShapeSet;
436 EmptyShapeSet emptyShapes;
438 bool debugMode; // true iff debug mode on
439 JSCList scripts; // scripts in this compartment
441 JSC::ExecutableAllocator *regExpAllocator;
443 js::NativeIterCache nativeIterCache;
445 js::ToSourceCache toSourceCache;
447 JSCompartment(JSRuntime *rt);
452 /* Mark cross-compartment wrappers. */
453 void markCrossCompartment(JSTracer *trc);
455 /* Mark this compartment's local roots. */
456 void mark(JSTracer *trc);
458 bool wrap(JSContext *cx, js::Value *vp);
459 bool wrap(JSContext *cx, JSString **strp);
460 bool wrap(JSContext *cx, JSObject **objp);
461 bool wrapId(JSContext *cx, jsid *idp);
462 bool wrap(JSContext *cx, js::PropertyOp *op);
463 bool wrap(JSContext *cx, js::StrictPropertyOp *op);
464 bool wrap(JSContext *cx, js::PropertyDescriptor *desc);
465 bool wrap(JSContext *cx, js::AutoIdVector &props);
467 void sweep(JSContext *cx, uint32 releaseInterval);
468 void purge(JSContext *cx);
469 void finishArenaLists();
470 void finalizeObjectArenaLists(JSContext *cx);
471 void finalizeStringArenaLists(JSContext *cx);
472 bool arenaListsAreEmpty();
474 void setGCLastBytes(size_t lastBytes);
476 js::DtoaCache dtoaCache;
479 js::MathCache *mathCache;
481 js::MathCache *allocMathCache(JSContext *cx);
485 typedef js::HashMap<jsbytecode*,
487 js::DefaultHasher<jsbytecode*>,
488 js::SystemAllocPolicy> BackEdgeMap;
490 BackEdgeMap backEdgeTable;
492 JSCompartment *thisForCtor() { return this; }
494 js::MathCache *getMathCache(JSContext *cx) {
495 return mathCache ? mathCache : allocMathCache(cx);
498 bool isMarked() { return marked; }
499 void clearMark() { marked = false; }
501 size_t backEdgeCount(jsbytecode *pc) const;
502 size_t incBackEdgeCount(jsbytecode *pc);
505 #define JS_SCRIPTS_TO_GC(cx) ((cx)->compartment->scriptsToGC)
506 #define JS_PROPERTY_TREE(cx) ((cx)->compartment->propertyTree)
509 #define JS_COMPARTMENT_METER(x) x
511 #define JS_COMPARTMENT_METER(x)
515 * N.B. JS_ON_TRACE(cx) is true if JIT code is on the stack in the current
516 * thread, regardless of whether cx is the context in which that trace is
517 * executing. cx must be a context on the current thread.
520 JS_ON_TRACE(JSContext *cx)
523 if (JS_THREAD_DATA(cx)->onTraceCompartment)
524 return JS_THREAD_DATA(cx)->onTraceCompartment->traceMonitor.ontrace();
530 static inline js::TraceMonitor *
531 JS_TRACE_MONITOR_ON_TRACE(JSContext *cx)
533 JS_ASSERT(JS_ON_TRACE(cx));
534 return &JS_THREAD_DATA(cx)->onTraceCompartment->traceMonitor;
538 * Only call this directly from the interpreter loop or the method jit.
539 * Otherwise, we may get the wrong compartment, and thus the wrong
542 static inline js::TraceMonitor *
543 JS_TRACE_MONITOR_FROM_CONTEXT(JSContext *cx)
545 return &cx->compartment->traceMonitor;
549 static inline js::TraceRecorder *
550 TRACE_RECORDER(JSContext *cx)
553 if (JS_THREAD_DATA(cx)->recordingCompartment)
554 return JS_THREAD_DATA(cx)->recordingCompartment->traceMonitor.recorder;
559 static inline js::LoopProfile *
560 TRACE_PROFILER(JSContext *cx)
563 if (JS_THREAD_DATA(cx)->profilingCompartment)
564 return JS_THREAD_DATA(cx)->profilingCompartment->traceMonitor.profile;
570 static inline MathCache *
571 GetMathCache(JSContext *cx)
573 return cx->compartment->getMathCache(cx);
578 # define EVAL_CACHE_METER(x) (cx->compartment->evalCacheMeter.x++)
580 # define EVAL_CACHE_METER(x) ((void) 0)
589 class PreserveCompartment {
593 JSCompartment *oldCompartment;
594 JS_DECL_USE_GUARD_OBJECT_NOTIFIER
596 PreserveCompartment(JSContext *cx JS_GUARD_OBJECT_NOTIFIER_PARAM) : cx(cx) {
597 JS_GUARD_OBJECT_NOTIFIER_INIT;
598 oldCompartment = cx->compartment;
601 ~PreserveCompartment() {
602 cx->compartment = oldCompartment;
606 class SwitchToCompartment : public PreserveCompartment {
608 SwitchToCompartment(JSContext *cx, JSCompartment *newCompartment) : PreserveCompartment(cx) {
609 cx->compartment = newCompartment;
612 SwitchToCompartment(JSContext *cx, JSObject *target) : PreserveCompartment(cx) {
613 cx->compartment = target->getCompartment();
617 class AssertCompartmentUnchanged {
619 JSContext * const cx;
620 JSCompartment * const oldCompartment;
621 JS_DECL_USE_GUARD_OBJECT_NOTIFIER
623 AssertCompartmentUnchanged(JSContext *cx JS_GUARD_OBJECT_NOTIFIER_PARAM)
624 : cx(cx), oldCompartment(cx->compartment) {
625 JS_GUARD_OBJECT_NOTIFIER_INIT;
628 ~AssertCompartmentUnchanged() {
629 JS_ASSERT(cx->compartment == oldCompartment);
635 #endif /* jscompartment_h___ */